diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml index b1e0697237d..16170a33c8b 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - E2E +name: Backups - E2E - Upgrade Downgrade Testing on: push: pull_request: @@ -10,33 +10,10 @@ concurrency: permissions: read-all jobs: - get_previous_release: - if: always() - name: Get Previous Release - Backups - E2E - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - upgrade_downgrade_test_e2e: timeout-minutes: 60 - if: always() && needs.get_previous_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -59,6 +36,16 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + if: steps.skip-workflow.outputs.skip-workflow == 'false' + id: output-previous-release-ref + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -117,11 +104,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml index 3929e22c536..41f5b40ec1d 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_e2e_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - E2E - Next Release +name: Backups - E2E - Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -10,33 +10,11 @@ concurrency: permissions: read-all jobs: - get_next_release: - if: always() - name: Get Latest Release - Backups - E2E - Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test_e2e: timeout-minutes: 60 - if: always() && needs.get_next_release.result == 'success' name: Run Upgrade Downgrade Test - Backups - E2E - Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -46,6 +24,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -53,16 +43,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -120,11 +106,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual.yml b/.github/workflows/upgrade_downgrade_test_backups_manual.yml index d3d97b60003..6c6ce2d9036 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - Manual +name: Backups - Manual - Upgrade Downgrade Testing on: push: pull_request: @@ -10,34 +10,12 @@ concurrency: permissions: read-all jobs: - get_previous_release: - if: always() - name: Get Previous Release - Backups - Manual - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT # This job usually execute in ± 20 minutes upgrade_downgrade_test_manual: timeout-minutes: 40 - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -61,6 +39,16 @@ jobs: - name: Checkout to commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -138,11 +126,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Checkout to the other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Checkout to the other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml index cd580be954b..7348b6e516d 100644 --- a/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing - Backups - Manual - Next Release +name: Backups - Manual - Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -10,34 +10,12 @@ concurrency: permissions: read-all jobs: - get_next_release: - if: always() - name: Get Previous Release - Backups - Manual - Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT # This job usually execute in ± 20 minutes upgrade_downgrade_test_manual: timeout-minutes: 40 - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Backups - Manual - Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -47,6 +25,19 @@ jobs: exit 1 fi + # Checkout to this build's commit + - name: Checkout to commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -54,17 +45,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - # Checkout to this build's commit - - name: Checkout to commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -141,11 +127,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Checkout to the other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Checkout to the other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml index 486a806796e..9f1d66ddd35 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Queries) +name: Query Serving (Queries) - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Query Serving (Queries) - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -61,6 +39,16 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml index 3a52a551bdc..0031d2c3b15 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_queries_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Queries) Next Release +name: Query Serving (Queries) Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Query Serving (Queries) Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Queries) Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,16 +45,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml index 017f65cbaaf..2ed8affaf50 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Schema) +name: Query Serving (Schema) - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Query Serving (Schema) - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -61,6 +39,16 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml index 952e8709da7..40a0b47e0d1 100644 --- a/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml +++ b/.github/workflows/upgrade_downgrade_test_query_serving_schema_next_release.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Query Serving (Schema) Next Release +name: Query Serving (Schema) Next Release - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtgate, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Query Serving (Schema) Next Release - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Query Serving (Schema) Next Release runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,16 +45,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml index c305dc91795..671acf0b275 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vtctl.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent New Vtctl +name: Reparent New Vtctl - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Reparent New Vtctl - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New Vtctl runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,16 +45,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml index 8f88bee005c..5455c808d8f 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_new_vttablet.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent New VTTablet +name: Reparent New VTTablet - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_next_release: - if: always() - name: Get Latest Release - Reparent New VTTablet - runs-on: gh-hosted-runners-16cores-1 - outputs: - next_release: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-next-release-ref - run: | - next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $next_release_ref - echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_next_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent New VTTablet runs-on: gh-hosted-runners-16cores-1 - needs: - - get_next_release steps: - name: Skip CI @@ -48,6 +26,18 @@ jobs: exit 1 fi + - name: Check out commit's code + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-next-release-ref + run: | + next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $next_release_ref + echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT + - name: Check if workflow needs to be skipped id: skip-workflow run: | @@ -55,16 +45,12 @@ jobs: if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then skip='true' fi - if [[ "${{needs.get_next_release.outputs.next_release}}" == "" ]]; then + if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then skip='true' fi echo Skip ${skip} echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT - - name: Check out commit's code - if: steps.skip-workflow.outputs.skip-workflow == 'false' - uses: actions/checkout@v3 - - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: frouioui/paths-filter@main @@ -138,11 +124,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the next release of Vitess - - name: Check out other version's code (${{ needs.get_next_release.outputs.next_release }}) + - name: Check out other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_next_release.outputs.next_release }} + ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }} - name: Get dependencies for the next release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml index c15f2776a11..44ea01cc7e7 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vtctl.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent Old Vtctl +name: Reparent Old Vtctl - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Reparent Old Vtctl - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old Vtctl runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -61,6 +39,16 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml index b2a9aea7c05..21815bc29ab 100644 --- a/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml +++ b/.github/workflows/upgrade_downgrade_test_reparent_old_vttablet.yml @@ -1,4 +1,4 @@ -name: Upgrade Downgrade Testing Reparent Old VTTablet +name: Reparent Old VTTablet - Upgrade Downgrade Testing on: push: pull_request: @@ -13,32 +13,10 @@ permissions: read-all # (vtctl, vttablet, etc) built on different versions. jobs: - get_previous_release: - if: always() - name: Get Previous Release - Reparent Old VTTablet - runs-on: gh-hosted-runners-16cores-1 - outputs: - previous_release: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - - steps: - - name: Check out to HEAD - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Set output with latest release branch - id: output-previous-release-ref - run: | - previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) - echo $previous_release_ref - echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT upgrade_downgrade_test: - if: always() && (needs.get_previous_release.result == 'success') name: Run Upgrade Downgrade Test - Reparent Old VTTablet runs-on: gh-hosted-runners-16cores-1 - needs: - - get_previous_release steps: - name: Skip CI @@ -61,6 +39,16 @@ jobs: - name: Check out commit's code if: steps.skip-workflow.outputs.skip-workflow == 'false' uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Set output with latest release branch + id: output-previous-release-ref + if: steps.skip-workflow.outputs.skip-workflow == 'false' + run: | + previous_release_ref=$(./tools/get_previous_release.sh ${{github.base_ref}} ${{github.ref}}) + echo $previous_release_ref + echo "previous_release_ref=${previous_release_ref}" >> $GITHUB_OUTPUT - name: Check for changes in relevant files if: steps.skip-workflow.outputs.skip-workflow == 'false' @@ -135,11 +123,11 @@ jobs: sudo apt-get install -y percona-xtrabackup-24 # Checkout to the last release of Vitess - - name: Check out other version's code (${{ needs.get_previous_release.outputs.previous_release }}) + - name: Check out other version's code (${{ steps.output-previous-release-ref.outputs.previous_release_ref }}) if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' uses: actions/checkout@v3 with: - ref: ${{ needs.get_previous_release.outputs.previous_release }} + ref: ${{ steps.output-previous-release-ref.outputs.previous_release_ref }} - name: Get dependencies for the last release if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' diff --git a/go.mod b/go.mod index af1ae0a7488..46cfb21af58 100644 --- a/go.mod +++ b/go.mod @@ -71,7 +71,7 @@ require ( go.etcd.io/etcd/client/pkg/v3 v3.5.8 go.etcd.io/etcd/client/v3 v3.5.8 go.uber.org/mock v0.2.0 - golang.org/x/crypto v0.16.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/mod v0.12.0 // indirect golang.org/x/net v0.19.0 golang.org/x/oauth2 v0.11.0 diff --git a/go.sum b/go.sum index 1df92514455..95f6c6037a4 100644 --- a/go.sum +++ b/go.sum @@ -663,8 +663,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.16.0 h1:mMMrFzRSCF0GvB7Ne27XVtVAaXLrPmgPC7/v0tkwHaY= -golang.org/x/crypto v0.16.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= diff --git a/go/cmd/mysqlctl/command/init.go b/go/cmd/mysqlctl/command/init.go index 71a9661aa80..14d8e5f6d29 100644 --- a/go/cmd/mysqlctl/command/init.go +++ b/go/cmd/mysqlctl/command/init.go @@ -49,7 +49,7 @@ var initArgs = struct { func commandInit(cmd *cobra.Command, args []string) error { // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/init_config.go b/go/cmd/mysqlctl/command/init_config.go index 70e751e02cb..36687482e08 100644 --- a/go/cmd/mysqlctl/command/init_config.go +++ b/go/cmd/mysqlctl/command/init_config.go @@ -40,7 +40,7 @@ var InitConfig = &cobra.Command{ func commandInitConfig(cmd *cobra.Command, args []string) error { // Generate my.cnf from scratch and use it to find mysqld. - mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err := mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/reinit_config.go b/go/cmd/mysqlctl/command/reinit_config.go index b06642c8203..fd7523c0411 100644 --- a/go/cmd/mysqlctl/command/reinit_config.go +++ b/go/cmd/mysqlctl/command/reinit_config.go @@ -41,7 +41,7 @@ var ReinitConfig = &cobra.Command{ func commandReinitConfig(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/root.go b/go/cmd/mysqlctl/command/root.go index 4f5626ef7e6..78b3a623666 100644 --- a/go/cmd/mysqlctl/command/root.go +++ b/go/cmd/mysqlctl/command/root.go @@ -23,21 +23,22 @@ import ( "vitess.io/vitess/go/acl" vtcmd "vitess.io/vitess/go/cmd" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" ) var ( - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + collationEnv *collations.Environment Root = &cobra.Command{ Use: "mysqlctl", Short: "mysqlctl initializes and controls mysqld with Vitess-specific configuration.", Long: "`mysqlctl` is a command-line client used for managing `mysqld` instances.\n\n" + - "It is responsible for bootstrapping tasks such as generating a configuration file for `mysqld` and initializing the instance and its data directory.\n" + "The `mysqld_safe` watchdog is utilized when present.\n" + "This helps ensure that `mysqld` is automatically restarted after failures.", @@ -74,4 +75,6 @@ func init() { Root.PersistentFlags().StringVar(&mysqlSocket, "mysql_socket", mysqlSocket, "Path to the mysqld socket file.") acl.RegisterFlags(Root.PersistentFlags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } diff --git a/go/cmd/mysqlctl/command/shutdown.go b/go/cmd/mysqlctl/command/shutdown.go index 6e2e3a74a61..321d4a9b35f 100644 --- a/go/cmd/mysqlctl/command/shutdown.go +++ b/go/cmd/mysqlctl/command/shutdown.go @@ -44,7 +44,7 @@ var shutdownArgs = struct { func commandShutdown(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/start.go b/go/cmd/mysqlctl/command/start.go index 397909e0966..aef404d0a8e 100644 --- a/go/cmd/mysqlctl/command/start.go +++ b/go/cmd/mysqlctl/command/start.go @@ -45,7 +45,7 @@ var startArgs = struct { func commandStart(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } diff --git a/go/cmd/mysqlctl/command/teardown.go b/go/cmd/mysqlctl/command/teardown.go index 4ad0539bdd1..89d7b3b5f6d 100644 --- a/go/cmd/mysqlctl/command/teardown.go +++ b/go/cmd/mysqlctl/command/teardown.go @@ -47,7 +47,7 @@ var teardownArgs = struct { func commandTeardown(cmd *cobra.Command, args []string) error { // There ought to be an existing my.cnf, so use it to find mysqld. - mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err := mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { return fmt.Errorf("failed to find mysql config: %v", err) } diff --git a/go/cmd/mysqlctld/cli/mysqlctld.go b/go/cmd/mysqlctld/cli/mysqlctld.go index 51a0c47f56e..7a5ff6a5ce6 100644 --- a/go/cmd/mysqlctld/cli/mysqlctld.go +++ b/go/cmd/mysqlctld/cli/mysqlctld.go @@ -28,6 +28,7 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -40,9 +41,10 @@ var ( mysqld *mysqlctl.Mysqld cnf *mysqlctl.Mycnf - mysqlPort = 3306 - tabletUID = uint32(41983) - mysqlSocket string + mysqlPort = 3306 + tabletUID = uint32(41983) + mysqlSocket string + collationEnv *collations.Environment // mysqlctl init flags waitTime = 5 * time.Minute @@ -90,6 +92,8 @@ func init() { Main.Flags().DurationVar(&shutdownWaitTime, "shutdown-wait-time", shutdownWaitTime, "How long to wait for mysqld shutdown") acl.RegisterFlags(Main.Flags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } func run(cmd *cobra.Command, args []string) error { @@ -110,7 +114,7 @@ func run(cmd *cobra.Command, args []string) error { log.Infof("mycnf file (%s) doesn't exist, initializing", mycnfFile) var err error - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort) + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(tabletUID, mysqlSocket, mysqlPort, collationEnv) if err != nil { cancel() return fmt.Errorf("failed to initialize mysql config: %w", err) @@ -126,7 +130,7 @@ func run(cmd *cobra.Command, args []string) error { log.Infof("mycnf file (%s) already exists, starting without init", mycnfFile) var err error - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID) + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(tabletUID, collationEnv) if err != nil { cancel() return fmt.Errorf("failed to find mysql config: %w", err) diff --git a/go/cmd/topo2topo/cli/topo2topo.go b/go/cmd/topo2topo/cli/topo2topo.go index 6e7e173872b..5dda62eaed1 100644 --- a/go/cmd/topo2topo/cli/topo2topo.go +++ b/go/cmd/topo2topo/cli/topo2topo.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/vt/grpccommon" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/helpers" ) @@ -94,12 +95,21 @@ func run(cmd *cobra.Command, args []string) error { return compareTopos(ctx, fromTS, toTS) } - return copyTopos(ctx, fromTS, toTS) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot create sqlparser: %w", err) + } + + return copyTopos(ctx, fromTS, toTS, parser) } -func copyTopos(ctx context.Context, fromTS, toTS *topo.Server) error { +func copyTopos(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { if doKeyspaces { - if err := helpers.CopyKeyspaces(ctx, fromTS, toTS); err != nil { + if err := helpers.CopyKeyspaces(ctx, fromTS, toTS, parser); err != nil { return err } } diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index 210e2edb918..224a6dbeacf 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -24,10 +24,13 @@ import ( "github.com/spf13/cobra" + _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cache" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -35,8 +38,6 @@ import ( vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" "vitess.io/vitess/go/vt/vtadmin/http/debug" "vitess.io/vitess/go/vt/vtadmin/rbac" - - _flag "vitess.io/vitess/go/internal/flag" ) var ( @@ -138,13 +139,22 @@ func run(cmd *cobra.Command, args []string) { log.Warningf("no cache-refresh-key set; forcing cache refreshes will not be possible") } cache.SetCacheRefreshKey(cacheRefreshKey) + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + fatal(err) + } s := vtadmin.NewAPI(clusters, vtadmin.Options{ GRPCOpts: opts, HTTPOpts: httpOpts, RBAC: rbacConfig, EnableDynamicClusters: enableDynamicClusters, - }) + }, collationEnv, parser) bootSpan.Finish() if err := s.ListenAndServe(); err != nil { @@ -208,6 +218,8 @@ func main() { rootCmd.Flags().AddGoFlag(flag.Lookup("stderrthreshold")) rootCmd.Flags().AddGoFlag(flag.Lookup("log_dir")) + servenv.RegisterMySQLServerFlags(rootCmd.Flags()) + if err := rootCmd.Execute(); err != nil { log.Fatal(err) } diff --git a/go/cmd/vtbackup/cli/vtbackup.go b/go/cmd/vtbackup/cli/vtbackup.go index d55cf643de4..4700d93eea1 100644 --- a/go/cmd/vtbackup/cli/vtbackup.go +++ b/go/cmd/vtbackup/cli/vtbackup.go @@ -29,11 +29,11 @@ import ( "github.com/spf13/cobra" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/acl" "vitess.io/vitess/go/cmd" "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" @@ -93,6 +93,8 @@ var ( keepAliveTimeout time.Duration disableRedoLog bool + collationEnv *collations.Environment + // Deprecated, use "Phase" instead. deprecatedDurationByPhase = stats.NewGaugesWithSingleLabel( "DurationByPhaseSeconds", @@ -215,6 +217,8 @@ func init() { Main.Flags().BoolVar(&disableRedoLog, "disable-redo-log", disableRedoLog, "Disable InnoDB redo log during replication-from-primary phase of backup.") acl.RegisterFlags(Main.Flags()) + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } func run(_ *cobra.Command, args []string) error { @@ -327,7 +331,7 @@ func takeBackup(ctx context.Context, topoServer *topo.Server, backupStorage back }() // Start up mysqld as if we are mysqlctld provisioning a fresh tablet. - mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort) + mysqld, mycnf, err := mysqlctl.CreateMysqldAndMycnf(tabletAlias.Uid, mysqlSocket, mysqlPort, collationEnv) if err != nil { return fmt.Errorf("failed to initialize mysql config: %v", err) } diff --git a/go/cmd/vtcombo/cli/main.go b/go/cmd/vtcombo/cli/main.go index 6912a886b18..35620e2bd9a 100644 --- a/go/cmd/vtcombo/cli/main.go +++ b/go/cmd/vtcombo/cli/main.go @@ -31,12 +31,14 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -78,7 +80,9 @@ In particular, it contains: tpb vttestpb.VTTestTopology ts *topo.Server + collationEnv *collations.Environment resilientServer *srvtopo.ResilientServer + parser *sqlparser.Parser ) func init() { @@ -114,6 +118,8 @@ func init() { // We're going to force the value later, so don't even bother letting the // user know about this flag. Main.Flags().MarkHidden("tablet_protocol") + + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) } func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err error) { @@ -123,7 +129,7 @@ func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err mycnfFile := mysqlctl.MycnfFile(uid) if _, statErr := os.Stat(mycnfFile); os.IsNotExist(statErr) { - mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort) + mysqld, cnf, err = mysqlctl.CreateMysqldAndMycnf(uid, "", mysqlPort, collationEnv) if err != nil { return nil, nil, fmt.Errorf("failed to initialize mysql config :%w", err) } @@ -131,7 +137,7 @@ func startMysqld(uid uint32) (mysqld *mysqlctl.Mysqld, cnf *mysqlctl.Mycnf, err return nil, nil, fmt.Errorf("failed to initialize mysql :%w", err) } } else { - mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid) + mysqld, cnf, err = mysqlctl.OpenMysqldAndMycnf(uid, collationEnv) if err != nil { return nil, nil, fmt.Errorf("failed to find mysql config: %w", err) } @@ -186,6 +192,15 @@ func run(cmd *cobra.Command, args []string) (err error) { servenv.Init() tabletenv.Init() + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("failed to initialize sql parser: %w", err) + } + var ( mysqld = &vtcomboMysqld{} cnf *mysqlctl.Mycnf @@ -205,7 +220,7 @@ func run(cmd *cobra.Command, args []string) (err error) { mysqld.SetReadOnly(false) } else { - dbconfigs.GlobalDBConfigs.InitWithSocket("") + dbconfigs.GlobalDBConfigs.InitWithSocket("", collationEnv) mysqld.Mysqld = mysqlctl.NewMysqld(&dbconfigs.GlobalDBConfigs) servenv.OnClose(mysqld.Close) } @@ -217,7 +232,7 @@ func run(cmd *cobra.Command, args []string) (err error) { // to be the "internal" protocol that InitTabletMap registers. cmd.Flags().Set("tablet_manager_protocol", "internal") cmd.Flags().Set("tablet_protocol", "internal") - uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql) + uid, err := vtcombo.InitTabletMap(ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, startMysql, collationEnv, parser) if err != nil { // ensure we start mysql in the event we fail here if startMysql { @@ -242,8 +257,8 @@ func run(cmd *cobra.Command, args []string) (err error) { } } - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) - newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv, parser) + newUID, err := vtcombo.CreateKs(ctx, ts, &tpb, mysqld, &dbconfigs.GlobalDBConfigs, schemaDir, ks, true, uid, wr, collationEnv, parser) if err != nil { return err } @@ -291,11 +306,12 @@ func run(cmd *cobra.Command, args []string) (err error) { vtgate.QueryLogHandler = "/debug/vtgate/querylog" vtgate.QueryLogzHandler = "/debug/vtgate/querylogz" vtgate.QueryzHandler = "/debug/vtgate/queryz" + // pass nil for healthcheck, it will get created - vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion) + vtg := vtgate.Init(context.Background(), nil, resilientServer, tpb.Cells[0], tabletTypesToWait, plannerVersion, collationEnv) // vtctld configuration and init - err = vtctld.InitVtctld(ts) + err = vtctld.InitVtctld(ts, collationEnv, parser) if err != nil { return err } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go index 2cf8eed8368..62a5e2bb358 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, ts, parser) } }) } diff --git a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go index 8b7f918bc58..e7f7b1b7302 100644 --- a/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtcombo/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts) + grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv, parser) } }) } diff --git a/go/cmd/vtcombo/cli/vschema_watcher.go b/go/cmd/vtcombo/cli/vschema_watcher.go index c1c9f120b96..e573109ab9e 100644 --- a/go/cmd/vtcombo/cli/vschema_watcher.go +++ b/go/cmd/vtcombo/cli/vschema_watcher.go @@ -63,7 +63,7 @@ func loadKeyspacesFromDir(dir string, keyspaces []*vttestpb.Keyspace, ts *topo.S log.Fatalf("Unable to parse keyspace file %v: %v", ksFile, err) } - _, err = vindexes.BuildKeyspace(keyspace) + _, err = vindexes.BuildKeyspace(keyspace, parser) if err != nil { log.Fatalf("Invalid keyspace definition: %v", err) } diff --git a/go/cmd/vtctl/vtctl.go b/go/cmd/vtctl/vtctl.go index e95f484cf4f..8979028ea23 100644 --- a/go/cmd/vtctl/vtctl.go +++ b/go/cmd/vtctl/vtctl.go @@ -31,10 +31,12 @@ import ( "vitess.io/vitess/go/cmd" "vitess.io/vitess/go/cmd/vtctldclient/command" "vitess.io/vitess/go/exit" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -126,10 +128,18 @@ func main() { ts := topo.Open() defer ts.Close() - ctx, cancel := context.WithTimeout(context.Background(), waitTime) installSignalHandlers(cancel) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("cannot initialize sql parser: %v", err) + } + // (TODO:ajm188) . // // For v12, we are going to support new commands by prefixing as: @@ -154,7 +164,7 @@ func main() { // New behavior. Strip off the prefix, and set things up to run through // the vtctldclient command tree, using the localvtctldclient (in-process) // client. - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, parser) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" @@ -170,8 +180,8 @@ func main() { fallthrough default: log.Warningf("WARNING: vtctl should only be used for VDiff v1 workflows. Please use VDiff v2 and consider using vtctldclient for all other commands.") - - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) if args[0] == "--" { vtctl.PrintDoubleDashDeprecationNotice(wr) @@ -179,7 +189,7 @@ func main() { } action = args[0] - err := vtctl.RunCommand(ctx, wr, args) + err = vtctl.RunCommand(ctx, wr, args) cancel() switch err { case vtctl.ErrUnknownCommand: diff --git a/go/cmd/vtctld/cli/cli.go b/go/cmd/vtctld/cli/cli.go index f7fef555896..b0135707512 100644 --- a/go/cmd/vtctld/cli/cli.go +++ b/go/cmd/vtctld/cli/cli.go @@ -20,14 +20,19 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctld" ) var ( - ts *topo.Server - Main = &cobra.Command{ + ts *topo.Server + collationEnv *collations.Environment + parser *sqlparser.Parser + Main = &cobra.Command{ Use: "vtctld", Short: "The Vitess cluster management daemon.", Long: `vtctld provides web and gRPC interfaces to manage a single Vitess cluster. @@ -59,8 +64,18 @@ func run(cmd *cobra.Command, args []string) error { ts = topo.Open() defer ts.Close() + var err error + collationEnv = collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } // Init the vtctld core - if err := vtctld.InitVtctld(ts); err != nil { + if err := vtctld.InitVtctld(ts, collationEnv, parser); err != nil { return err } @@ -86,4 +101,14 @@ func init() { servenv.MoveFlagsToCobraCommand(Main) acl.RegisterFlags(Main.Flags()) + + var err error + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("cannot initialize sql parser: %v", err) + } } diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go index ff283d91336..3385160e9f8 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctldserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctld") { - grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + grpcvtctldserver.StartServer(servenv.GRPCServer, ts, parser) } }) } diff --git a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go index 8b7f918bc58..e7f7b1b7302 100644 --- a/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go +++ b/go/cmd/vtctld/cli/plugin_grpcvtctlserver.go @@ -24,7 +24,7 @@ import ( func init() { servenv.OnRun(func() { if servenv.GRPCCheckServiceMap("vtctl") { - grpcvtctlserver.StartServer(servenv.GRPCServer, ts) + grpcvtctlserver.StartServer(servenv.GRPCServer, ts, collationEnv, parser) } }) } diff --git a/go/cmd/vtctld/cli/schema.go b/go/cmd/vtctld/cli/schema.go index 68dc47b2b6f..9092dbf03bd 100644 --- a/go/cmd/vtctld/cli/schema.go +++ b/go/cmd/vtctld/cli/schema.go @@ -71,11 +71,11 @@ func initSchema() { return } ctx := context.Background() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) _, err = schemamanager.Run( ctx, controller, - schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0), + schemamanager.NewTabletExecutor("vtctld/schema", wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), schemaChangeReplicasTimeout, 0, parser), ) if err != nil { log.Errorf("Schema change failed, error: %v", err) diff --git a/go/cmd/vtctldclient/cli/pflag.go b/go/cmd/vtctldclient/cli/pflag.go index 04d202cd644..f985e74901e 100644 --- a/go/cmd/vtctldclient/cli/pflag.go +++ b/go/cmd/vtctldclient/cli/pflag.go @@ -19,23 +19,11 @@ package cli import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/vt/topo/topoproto" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) -// StringMapValue augments flagutil.StringMapValue so it can be used as a -// pflag.Value. -type StringMapValue struct { - flagutil.StringMapValue -} - -// Type is part of the pflag.Value interface. -func (v *StringMapValue) Type() string { - return "cli.StringMapValue" -} - // KeyspaceTypeFlag adds the pflag.Value interface to a topodatapb.KeyspaceType. type KeyspaceTypeFlag topodatapb.KeyspaceType diff --git a/go/cmd/vtctldclient/command/root.go b/go/cmd/vtctldclient/command/root.go index a5848a7b42a..9e6b2df170b 100644 --- a/go/cmd/vtctldclient/command/root.go +++ b/go/cmd/vtctldclient/command/root.go @@ -29,8 +29,10 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/trace" + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vtctl/localvtctldclient" @@ -80,6 +82,8 @@ var ( actionTimeout time.Duration compactOutput bool + parser *sqlparser.Parser + topoOptions = struct { implementation string globalServerAddresses []string @@ -208,7 +212,7 @@ func getClientForCommand(cmd *cobra.Command) (vtctldclient.VtctldClient, error) return nil }) }) - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, parser) localvtctldclient.SetServer(vtctld) VtctldClientProtocol = "local" server = "" @@ -225,4 +229,14 @@ func init() { Root.PersistentFlags().StringSliceVar(&topoOptions.globalServerAddresses, "topo-global-server-address", topoOptions.globalServerAddresses, "the address of the global topology server(s)") Root.PersistentFlags().StringVar(&topoOptions.globalRoot, "topo-global-root", topoOptions.globalRoot, "the path of the global topology data in the global topology server") vreplcommon.RegisterCommands(Root) + + var err error + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("failed to initialize sqlparser: %v", err) + } } diff --git a/go/cmd/vtctldclient/command/schema.go b/go/cmd/vtctldclient/command/schema.go index 2d31e3500c1..4a46108ba26 100644 --- a/go/cmd/vtctldclient/command/schema.go +++ b/go/cmd/vtctldclient/command/schema.go @@ -29,7 +29,6 @@ import ( "vitess.io/vitess/go/protoutil" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -123,7 +122,7 @@ func commandApplySchema(cmd *cobra.Command, args []string) error { allSQL = strings.Join(applySchemaOptions.SQL, ";") } - parts, err := sqlparser.SplitStatementToPieces(allSQL) + parts, err := parser.SplitStatementToPieces(allSQL) if err != nil { return err } diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils.go b/go/cmd/vtctldclient/command/vreplication/common/utils.go index da6e3329579..02dc88ae769 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils.go @@ -64,6 +64,9 @@ var ( DeferSecondaryKeys bool AutoStart bool StopAfterCopy bool + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int }{} ) diff --git a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go index 0dc179060d6..0660cb6d742 100644 --- a/go/cmd/vtctldclient/command/vreplication/common/utils_test.go +++ b/go/cmd/vtctldclient/command/vreplication/common/utils_test.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/command" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -144,7 +145,7 @@ func SetupLocalVtctldClient(t *testing.T, ctx context.Context, cells ...string) tmclient.RegisterTabletManagerClientFactory("grpc", func() tmclient.TabletManagerClient { return nil }) - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) localvtctldclient.SetServer(vtctld) command.VtctldClientProtocol = "local" client, err := vtctldclient.New(command.VtctldClientProtocol, "") diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/create.go b/go/cmd/vtctldclient/command/vreplication/materialize/create.go index 51f3ee42ee9..88aed1c664c 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/create.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/create.go @@ -102,6 +102,15 @@ func commandCreate(cmd *cobra.Command, args []string) error { TabletSelectionPreference: tsp, } + createOptions.TableSettings.parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: common.CreateOptions.MySQLServerVersion, + TruncateUILen: common.CreateOptions.TruncateUILen, + TruncateErrLen: common.CreateOptions.TruncateErrLen, + }) + if err != nil { + return err + } + req := &vtctldatapb.MaterializeCreateRequest{ Settings: ms, } @@ -132,7 +141,8 @@ func commandCreate(cmd *cobra.Command, args []string) error { // tableSettings is a wrapper around a slice of TableMaterializeSettings // proto messages that implements the pflag.Value interface. type tableSettings struct { - val []*vtctldatapb.TableMaterializeSettings + val []*vtctldatapb.TableMaterializeSettings + parser *sqlparser.Parser } func (ts *tableSettings) String() string { @@ -157,7 +167,7 @@ func (ts *tableSettings) Set(v string) error { return fmt.Errorf("missing target_table or source_expression") } // Validate that the query is valid. - stmt, err := sqlparser.Parse(tms.SourceExpression) + stmt, err := ts.parser.Parse(tms.SourceExpression) if err != nil { return fmt.Errorf("invalid source_expression: %q", tms.SourceExpression) } diff --git a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go index 58be1ec4433..5845504af3f 100644 --- a/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go +++ b/go/cmd/vtctldclient/command/vreplication/materialize/materialize.go @@ -17,9 +17,12 @@ limitations under the License. package materialize import ( + "fmt" + "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/command/vreplication/common" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/topo/topoproto" ) @@ -46,6 +49,9 @@ func registerCommands(root *cobra.Command) { create.Flags().Var(&createOptions.TableSettings, "table-settings", "A JSON array defining what tables to materialize using what select statements. See the --help output for more details.") create.MarkFlagRequired("table-settings") create.Flags().BoolVar(&common.CreateOptions.StopAfterCopy, "stop-after-copy", false, "Stop the workflow after it's finished copying the existing rows and before it starts replicating changes.") + create.Flags().StringVar(&common.CreateOptions.MySQLServerVersion, "mysql_server_version", fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion), "Configure the MySQL version to use for example for the parser.") + create.Flags().IntVar(&common.CreateOptions.TruncateUILen, "sql-max-length-ui", 512, "truncate queries in debug UIs to the given length (default 512)") + create.Flags().IntVar(&common.CreateOptions.TruncateErrLen, "sql-max-length-errors", 0, "truncate queries in error logs to the given length (default unlimited)") base.AddCommand(create) // Generic workflow commands. diff --git a/go/cmd/vtctldclient/command/vreplication/mount/mount.go b/go/cmd/vtctldclient/command/vreplication/mount/mount.go index 95ce3961e71..33bc69e5626 100644 --- a/go/cmd/vtctldclient/command/vreplication/mount/mount.go +++ b/go/cmd/vtctldclient/command/vreplication/mount/mount.go @@ -143,9 +143,7 @@ func commandList(cmd *cobra.Command, args []string) error { if err != nil { return err } - if err != nil { - return err - } + data, err := json.Marshal(resp) if err != nil { return err diff --git a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go index 1a2a374cf81..23a4f2e0bbd 100644 --- a/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go +++ b/go/cmd/vtctldclient/command/vreplication/vdiff/vdiff_env_test.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/workflow" @@ -83,7 +84,7 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.ws = workflow.NewServer(env.topoServ, env.tmc) + env.ws = workflow.NewServer(env.topoServ, env.tmc, sqlparser.NewTestParser()) env.tmc.testEnv = env // Generate a unique dialer name. diff --git a/go/cmd/vtexplain/cli/vtexplain.go b/go/cmd/vtexplain/cli/vtexplain.go index 8b0622cf8a3..ee71336a8d7 100644 --- a/go/cmd/vtexplain/cli/vtexplain.go +++ b/go/cmd/vtexplain/cli/vtexplain.go @@ -22,8 +22,10 @@ import ( "os" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtexplain" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" @@ -78,9 +80,7 @@ If no keyspace name is present, VTExplain will return the following error: ` + "```\n", Example: "Explain how Vitess will execute the query `SELECT * FROM users` using the VSchema contained in `vschemas.json` and the database schema `schema.sql`:\n\n" + "```\nvtexplain --vschema-file vschema.json --schema-file schema.sql --sql \"SELECT * FROM users\"\n```\n\n" + - "Explain how the example will execute on 128 shards using Row-based replication:\n\n" + - "```\nvtexplain -- -shards 128 --vschema-file vschema.json --schema-file schema.sql --replication-mode \"ROW\" --output-mode text --sql \"INSERT INTO users (user_id, name) VALUES(1, 'john')\"\n```\n", Args: cobra.NoArgs, PreRunE: servenv.CobraPreRunE, @@ -175,7 +175,16 @@ func parseAndRun() error { Target: dbName, } - vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts) + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return err + } + vte, err := vtexplain.Init(context.Background(), vschema, schema, ksShardMap, opts, collationEnv, parser) if err != nil { return err } diff --git a/go/cmd/vtgate/cli/cli.go b/go/cmd/vtgate/cli/cli.go index bcd280890e5..c81570524f5 100644 --- a/go/cmd/vtgate/cli/cli.go +++ b/go/cmd/vtgate/cli/cli.go @@ -23,6 +23,8 @@ import ( "github.com/spf13/cobra" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/discovery" @@ -157,9 +159,10 @@ func run(cmd *cobra.Command, args []string) error { } plannerVersion, _ := plancontext.PlannerNameToVersion(plannerName) + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) // pass nil for HealthCheck and it will be created - vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion) + vtg := vtgate.Init(context.Background(), nil, resilientServer, cell, tabletTypes, plannerVersion, collationEnv) servenv.OnRun(func() { // Flags are parsed now. Parse the template using the actual flag value and overwrite the current template. diff --git a/go/cmd/vttablet/cli/cli.go b/go/cmd/vttablet/cli/cli.go index d68856be9b6..80c1a904419 100644 --- a/go/cmd/vttablet/cli/cli.go +++ b/go/cmd/vttablet/cli/cli.go @@ -26,11 +26,13 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/binlog" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/tableacl/simpleacl" "vitess.io/vitess/go/vt/topo" @@ -110,14 +112,24 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to parse --tablet-path: %w", err) } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot initialize sql parser: %w", err) + } + + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) // config and mycnf initializations are intertwined. - config, mycnf, err := initConfig(tabletAlias) + config, mycnf, err := initConfig(tabletAlias, collationEnv) if err != nil { return err } ts := topo.Open() - qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias) + qsc, err := createTabletServer(context.Background(), config, ts, tabletAlias, collationEnv, parser) if err != nil { ts.Close() return err @@ -131,12 +143,20 @@ func run(cmd *cobra.Command, args []string) error { return fmt.Errorf("failed to extract online DDL binaries: %w", err) } + parser, err = sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + return fmt.Errorf("cannot initialize sql parser: %w", err) + } // Initialize and start tm. gRPCPort := int32(0) if servenv.GRPCPort() != 0 { gRPCPort = int32(servenv.GRPCPort()) } - tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB) + tablet, err := tabletmanager.BuildTabletFromInput(tabletAlias, int32(servenv.Port()), gRPCPort, config.DB, collationEnv) if err != nil { return fmt.Errorf("failed to parse --tablet-path: %w", err) } @@ -147,9 +167,11 @@ func run(cmd *cobra.Command, args []string) error { MysqlDaemon: mysqld, DBConfigs: config.DB.Clone(), QueryServiceControl: qsc, - UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine()), - VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler()), - VDiffEngine: vdiff.NewEngine(config, ts, tablet), + UpdateStream: binlog.NewUpdateStream(ts, tablet.Keyspace, tabletAlias.Cell, qsc.SchemaEngine(), parser), + VREngine: vreplication.NewEngine(config, ts, tabletAlias.Cell, mysqld, qsc.LagThrottler(), collationEnv, parser), + VDiffEngine: vdiff.NewEngine(ts, tablet, collationEnv, parser), + CollationEnv: collationEnv, + SQLParser: parser, } if err := tm.Start(tablet, config); err != nil { ts.Close() @@ -169,7 +191,7 @@ func run(cmd *cobra.Command, args []string) error { return nil } -func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { +func initConfig(tabletAlias *topodatapb.TabletAlias, collationEnv *collations.Environment) (*tabletenv.TabletConfig, *mysqlctl.Mycnf, error) { tabletenv.Init() // Load current config after tabletenv.Init, because it changes it. config := tabletenv.NewCurrentConfig() @@ -211,9 +233,9 @@ func initConfig(tabletAlias *topodatapb.TabletAlias) (*tabletenv.TabletConfig, * // If connection parameters were specified, socketFile will be empty. // Otherwise, the socketFile (read from mycnf) will be used to initialize // dbconfigs. - config.DB.InitWithSocket(socketFile) + config.DB.InitWithSocket(socketFile, collationEnv) for _, cfg := range config.ExternalConnections { - cfg.InitWithSocket("") + cfg.InitWithSocket("", collationEnv) } return config, mycnf, nil } @@ -237,7 +259,7 @@ func extractOnlineDDL() error { return nil } -func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias) (*tabletserver.TabletServer, error) { +func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts *topo.Server, tabletAlias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) (*tabletserver.TabletServer, error) { if tableACLConfig != "" { // To override default simpleacl, other ACL plugins must set themselves to be default ACL factory tableacl.Register("simpleacl", &simpleacl.Factory{}) @@ -246,7 +268,7 @@ func createTabletServer(ctx context.Context, config *tabletenv.TabletConfig, ts } // creates and registers the query service - qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias) + qsc := tabletserver.NewTabletServer(ctx, "", config, ts, tabletAlias, collationEnv, parser) servenv.OnRun(func() { qsc.Register() addStatusParts(qsc) diff --git a/go/errors/errors.go b/go/errors/errors.go index d3349d320ed..22a3ba937e9 100644 --- a/go/errors/errors.go +++ b/go/errors/errors.go @@ -32,7 +32,7 @@ func Unwrap(err error) []error { return nil } -// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively +// UnwrapAll unwraps an error created by errors.Join() in Go 1.20, into its components, recursively func UnwrapAll(err error) (errs []error) { if err == nil { return nil @@ -46,7 +46,7 @@ func UnwrapAll(err error) (errs []error) { return []error{err} } -// Unwrap unwraps an error created by errors.Join() in Go 1.20, into its components, recursively, +// UnwrapFirst unwraps an error created by errors.Join() in Go 1.20, into its components, recursively, // and returns one (the first) unwrapped error func UnwrapFirst(err error) error { if err == nil { diff --git a/go/flags/endtoend/mysqlctl.txt b/go/flags/endtoend/mysqlctl.txt index 518c3f49d4a..d729a44826d 100644 --- a/go/flags/endtoend/mysqlctl.txt +++ b/go/flags/endtoend/mysqlctl.txt @@ -81,6 +81,7 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) diff --git a/go/flags/endtoend/mysqlctld.txt b/go/flags/endtoend/mysqlctld.txt index ccb89f08bf5..a5bec13f09a 100644 --- a/go/flags/endtoend/mysqlctld.txt +++ b/go/flags/endtoend/mysqlctld.txt @@ -106,6 +106,7 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication_connect_retry duration how long to wait in between replica reconnect attempts. Only precise to the second. (default 10s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) diff --git a/go/flags/endtoend/topo2topo.txt b/go/flags/endtoend/topo2topo.txt index a96d3cfda61..49f24583668 100644 --- a/go/flags/endtoend/topo2topo.txt +++ b/go/flags/endtoend/topo2topo.txt @@ -33,6 +33,7 @@ Flags: --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) diff --git a/go/flags/endtoend/vtaclcheck.txt b/go/flags/endtoend/vtaclcheck.txt index a7ba7604f46..29bdc63f835 100644 --- a/go/flags/endtoend/vtaclcheck.txt +++ b/go/flags/endtoend/vtaclcheck.txt @@ -21,6 +21,7 @@ Flags: --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --static-auth-file string The path of the auth_server_static JSON file to check diff --git a/go/flags/endtoend/vtbackup.txt b/go/flags/endtoend/vtbackup.txt index 5fedbde91c6..6b177c4c342 100644 --- a/go/flags/endtoend/vtbackup.txt +++ b/go/flags/endtoend/vtbackup.txt @@ -182,6 +182,7 @@ Flags: --opentsdb_uri string URI of opentsdb /api/put method --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --remote_operation_timeout duration time to wait for a remote operation (default 15s) --restart_before_backup Perform a mysqld clean/full restart after applying binlogs, but before taking the backup. Only makes sense to work around xtrabackup bugs. diff --git a/go/flags/endtoend/vtbench.txt b/go/flags/endtoend/vtbench.txt index 22066778fe2..4375f4ecdef 100644 --- a/go/flags/endtoend/vtbench.txt +++ b/go/flags/endtoend/vtbench.txt @@ -72,6 +72,7 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --port int VTGate port --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --protocol string Client protocol, either mysql (default), grpc-vtgate, or grpc-vttablet (default "mysql") --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) diff --git a/go/flags/endtoend/vtclient.txt b/go/flags/endtoend/vtclient.txt index e82b2807603..daab2b5683f 100644 --- a/go/flags/endtoend/vtclient.txt +++ b/go/flags/endtoend/vtclient.txt @@ -38,6 +38,7 @@ Flags: --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --parallel int DMLs only: Number of threads executing the same query in parallel. Useful for simple load testing. (default 1) --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --qps int queries per second to throttle each thread at. --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) diff --git a/go/flags/endtoend/vtcombo.txt b/go/flags/endtoend/vtcombo.txt index 7529bfe2f18..72771f18973 100644 --- a/go/flags/endtoend/vtcombo.txt +++ b/go/flags/endtoend/vtcombo.txt @@ -256,6 +256,7 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --proto_topo vttest.TopoData vttest proto definition of the topology, encoded in compact text format. See vttest.proto for more information. --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them diff --git a/go/flags/endtoend/vtctlclient.txt b/go/flags/endtoend/vtctlclient.txt index 4a4e44763f1..61185dec18b 100644 --- a/go/flags/endtoend/vtctlclient.txt +++ b/go/flags/endtoend/vtctlclient.txt @@ -30,6 +30,7 @@ Usage of vtctlclient: --logbuflevel int Buffer log messages logged at this level or lower (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms. --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --server string server to use for connection diff --git a/go/flags/endtoend/vtctld.txt b/go/flags/endtoend/vtctld.txt index 82895637c69..8939ceb0b35 100644 --- a/go/flags/endtoend/vtctld.txt +++ b/go/flags/endtoend/vtctld.txt @@ -94,12 +94,14 @@ Flags: --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --max-stack-size int configure the maximum stack size in bytes (default 67108864) + --mysql_server_version string MySQL server version to advertise. (default "8.0.30-Vitess") --onclose_timeout duration wait no more than this for OnClose handlers before stopping (default 10s) --onterm_timeout duration wait no more than this for OnTermSync handlers before stopping (default 10s) --opentsdb_uri string URI of opentsdb /api/put method --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --proxy_tablets Setting this true will make vtctld proxy the tablet status instead of redirecting to them --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --remote_operation_timeout duration time to wait for a remote operation (default 15s) diff --git a/go/flags/endtoend/vtexplain.txt b/go/flags/endtoend/vtexplain.txt index 748856a97a6..bdbe1da8768 100644 --- a/go/flags/endtoend/vtexplain.txt +++ b/go/flags/endtoend/vtexplain.txt @@ -64,6 +64,7 @@ Flags: --output-mode string Output in human-friendly text or json (default "text") --planner-version string Sets the default planner to use. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --replication-mode string The replication mode to simulate -- must be set to either ROW or STATEMENT (default "ROW") --schema string The SQL table schema diff --git a/go/flags/endtoend/vtgate.txt b/go/flags/endtoend/vtgate.txt index 55974504d5e..0d296c35e7c 100644 --- a/go/flags/endtoend/vtgate.txt +++ b/go/flags/endtoend/vtgate.txt @@ -163,6 +163,7 @@ Flags: --planner-version string Sets the default planner to use when the session has not changed it. Valid values are: Gen4, Gen4Greedy, Gen4Left2Right --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --proxy_protocol Enable HAProxy PROXY protocol on MySQL listener socket --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --query-timeout int Sets the default query timeout (in ms). Can be overridden by session variable (query_timeout) or comment directive (QUERY_TIMEOUT_MS) diff --git a/go/flags/endtoend/vtgateclienttest.txt b/go/flags/endtoend/vtgateclienttest.txt index 6a05d975466..32e892805e4 100644 --- a/go/flags/endtoend/vtgateclienttest.txt +++ b/go/flags/endtoend/vtgateclienttest.txt @@ -56,6 +56,7 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --security_policy string the name of a registered security policy to use for controlling access to URLs - empty means allow all for anyone (built-in policies: deny-all, read-only) --service_map strings comma separated list of services to enable (or disable if prefixed with '-') Example: grpc-queryservice diff --git a/go/flags/endtoend/vtorc.txt b/go/flags/endtoend/vtorc.txt index 8ea30e2ff10..0460153301d 100644 --- a/go/flags/endtoend/vtorc.txt +++ b/go/flags/endtoend/vtorc.txt @@ -61,6 +61,7 @@ Flags: --pid_file string If set, the process will write its pid to the named file, and delete it on graceful shutdown. --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --prevent-cross-cell-failover Prevent VTOrc from promoting a primary in a different cell than the current primary in case of a failover --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --reasonable-replication-lag duration Maximum replication lag on replicas which is deemed to be acceptable (default 10s) diff --git a/go/flags/endtoend/vttablet.txt b/go/flags/endtoend/vttablet.txt index 59016b972ba..e7432a9151b 100644 --- a/go/flags/endtoend/vttablet.txt +++ b/go/flags/endtoend/vttablet.txt @@ -254,6 +254,7 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int port for the server --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --pt-osc-path string override default pt-online-schema-change binary full path --publish_retry_interval duration how long vttablet waits to retry publishing the tablet record (default 30s) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) diff --git a/go/flags/endtoend/vttestserver.txt b/go/flags/endtoend/vttestserver.txt index aac0d1e5286..967fcd949c2 100644 --- a/go/flags/endtoend/vttestserver.txt +++ b/go/flags/endtoend/vttestserver.txt @@ -99,6 +99,7 @@ Flags: --pool_hostname_resolve_interval duration if set force an update to all hostnames and reconnect if changed, defaults to 0 (disabled) --port int Port to use for vtcombo. If this is 0, a random port will be chosen. --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --proto_topo string Define the fake cluster topology as a compact text format encoded vttest proto. See vttest.proto for more information. --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --queryserver-config-transaction-timeout float query server transaction timeout (in seconds), a transaction will be killed if it takes longer than this value diff --git a/go/flags/endtoend/zkctl.txt b/go/flags/endtoend/zkctl.txt index 727c0f28191..18c47d7c2d2 100644 --- a/go/flags/endtoend/zkctl.txt +++ b/go/flags/endtoend/zkctl.txt @@ -28,6 +28,7 @@ Flags: --log_rotate_max_size uint size in bytes at which logs are rotated (glog.MaxSize) (default 1887436800) --logtostderr log to standard error instead of files --pprof strings enable profiling + --pprof-http enable pprof http endpoints (default true) --purge_logs_interval duration how often try to remove old logs (default 1h0m0s) --stderrthreshold severityFlag logs at or above this threshold go to stderr (default 1) --v Level log level for V logs diff --git a/go/flagutil/deprecated_float64_seconds.go b/go/flagutil/deprecated_float64_seconds.go deleted file mode 100644 index d9afb11aaa2..00000000000 --- a/go/flagutil/deprecated_float64_seconds.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright 2023 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package flagutil - -import ( - "strconv" - "time" - - "vitess.io/vitess/go/vt/log" -) - -type DeprecatedFloat64Seconds struct { - name string - val time.Duration -} - -var _ Value[time.Duration] = (*DeprecatedFloat64Seconds)(nil) - -func NewDeprecatedFloat64Seconds(name string, defVal time.Duration) DeprecatedFloat64Seconds { - return DeprecatedFloat64Seconds{ - name: name, - val: defVal, - } -} - -func (f *DeprecatedFloat64Seconds) String() string { return f.val.String() } -func (f *DeprecatedFloat64Seconds) Type() string { return "duration" } - -func (f *DeprecatedFloat64Seconds) Set(arg string) error { - v, err := time.ParseDuration(arg) - if err != nil { - log.Warningf("failed to parse %s as duration (err: %v); falling back to parsing to %s as seconds. this is deprecated and will be removed in a future release", f.name, err, f.val) - - n, err := strconv.ParseFloat(arg, 64) - if err != nil { - return err - } - - v = time.Duration(n * float64(time.Second)) - } - - f.val = v - return nil -} - -func (f DeprecatedFloat64Seconds) Clone() DeprecatedFloat64Seconds { - return DeprecatedFloat64Seconds{ - name: f.name, - val: f.val, - } -} - -func (f DeprecatedFloat64Seconds) Name() string { return f.name } -func (f DeprecatedFloat64Seconds) Get() time.Duration { return f.val } - -func (f *DeprecatedFloat64Seconds) UnmarshalJSON(data []byte) error { - return f.Set(string(data)) -} diff --git a/go/hack/runtime.go b/go/hack/runtime.go index 5f6b946e33d..83428504818 100644 --- a/go/hack/runtime.go +++ b/go/hack/runtime.go @@ -22,21 +22,10 @@ import ( "unsafe" ) -//go:noescape -//go:linkname memhash runtime.memhash -func memhash(p unsafe.Pointer, h, s uintptr) uintptr - //go:noescape //go:linkname strhash runtime.strhash func strhash(p unsafe.Pointer, h uintptr) uintptr -// RuntimeMemhash provides access to the Go runtime's default hash function for arbitrary bytes. -// This is an optimal hash function which takes an input seed and is potentially implemented in hardware -// for most architectures. This is the same hash function that the language's `map` uses. -func RuntimeMemhash(b []byte, seed uint64) uint64 { - return uint64(memhash(unsafe.Pointer(unsafe.SliceData(b)), uintptr(seed), uintptr(len(b)))) -} - // RuntimeStrhash provides access to the Go runtime's default hash function for strings. // This is an optimal hash function which takes an input seed and is potentially implemented in hardware // for most architectures. This is the same hash function that the language's `map` uses. diff --git a/go/mysql/auth_server_clientcert_test.go b/go/mysql/auth_server_clientcert_test.go index 3314116e953..ca32bbfc7ee 100644 --- a/go/mysql/auth_server_clientcert_test.go +++ b/go/mysql/auth_server_clientcert_test.go @@ -39,7 +39,7 @@ func TestValidCert(t *testing.T) { authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() @@ -108,7 +108,7 @@ func TestNoCert(t *testing.T) { authServer := newAuthServerClientCert(string(MysqlClearPassword)) // Create the listener, so we can get its host. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err, "NewListener failed: %v", err) defer l.Close() host := l.Addr().(*net.TCPAddr).IP.String() diff --git a/go/mysql/client.go b/go/mysql/client.go index db1fd0cb68f..16740bf38db 100644 --- a/go/mysql/client.go +++ b/go/mysql/client.go @@ -106,7 +106,7 @@ func Connect(ctx context.Context, params *ConnParams) (*Conn, error) { } // Send the connection back, so the other side can close it. - c := newConn(conn, params.FlushDelay) + c := newConn(conn, params.FlushDelay, params.TruncateErrLen) status <- connectResult{ c: c, } @@ -229,11 +229,6 @@ func (c *Conn) clientHandshake(params *ConnParams) error { c.Capabilities = capabilities & (CapabilityClientDeprecateEOF) } - charset, err := collations.Local().ParseConnectionCharset(params.Charset) - if err != nil { - return err - } - // Handle switch to SSL if necessary. if params.SslEnabled() { // If client asked for SSL, but server doesn't support it, @@ -270,7 +265,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { } // Send the SSLRequest packet. - if err := c.writeSSLRequest(capabilities, charset, params); err != nil { + if err := c.writeSSLRequest(capabilities, uint8(params.Charset), params); err != nil { return err } @@ -302,7 +297,7 @@ func (c *Conn) clientHandshake(params *ConnParams) error { // Build and send our handshake response 41. // Note this one will never have SSL flag on. - if err := c.writeHandshakeResponse41(capabilities, scrambledPassword, charset, params); err != nil { + if err := c.writeHandshakeResponse41(capabilities, scrambledPassword, uint8(params.Charset), params); err != nil { return err } diff --git a/go/mysql/client_test.go b/go/mysql/client_test.go index 057a8584679..5e9a634c13f 100644 --- a/go/mysql/client_test.go +++ b/go/mysql/client_test.go @@ -151,7 +151,7 @@ func TestTLSClientDisabled(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -223,7 +223,7 @@ func TestTLSClientPreferredDefault(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -296,7 +296,7 @@ func TestTLSClientRequired(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -343,7 +343,7 @@ func TestTLSClientVerifyCA(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() @@ -426,7 +426,7 @@ func TestTLSClientVerifyIdentity(t *testing.T) { // Below, we are enabling --ssl-verify-server-cert, which adds // a check that the common name of the certificate matches the // server host name we connect to. - l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion) + l, err := NewListener("tcp", "127.0.0.1:", authServer, th, 0, 0, false, false, 0, 0, mysqlVersion, 0) require.NoError(t, err) defer l.Close() diff --git a/go/mysql/collations/cached_size.go b/go/mysql/collations/cached_size.go new file mode 100644 index 00000000000..630bf41230a --- /dev/null +++ b/go/mysql/collations/cached_size.go @@ -0,0 +1,111 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by Sizegen. DO NOT EDIT. + +package collations + +import ( + "math" + "reflect" + "unsafe" + + hack "vitess.io/vitess/go/hack" +) + +//go:nocheckptr +func (cached *Environment) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field byName map[string]vitess.io/vitess/go/mysql/collations.ID + if cached.byName != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byName) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byName) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for k := range cached.byName { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + // field byCharset map[string]*vitess.io/vitess/go/mysql/collations.colldefaults + if cached.byCharset != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byCharset) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 208)) + if len(cached.byCharset) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 208)) + } + for k, v := range cached.byCharset { + size += hack.RuntimeAllocSize(int64(len(k))) + if v != nil { + size += hack.RuntimeAllocSize(int64(4)) + } + } + } + // field byCharsetName map[vitess.io/vitess/go/mysql/collations.ID]string + if cached.byCharsetName != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byCharsetName) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byCharsetName) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for _, v := range cached.byCharsetName { + size += hack.RuntimeAllocSize(int64(len(v))) + } + } + // field unsupported map[string]vitess.io/vitess/go/mysql/collations.ID + if cached.unsupported != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.unsupported) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.unsupported) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for k := range cached.unsupported { + size += hack.RuntimeAllocSize(int64(len(k))) + } + } + // field byID map[vitess.io/vitess/go/mysql/collations.ID]string + if cached.byID != nil { + size += int64(48) + hmap := reflect.ValueOf(cached.byID) + numBuckets := int(math.Pow(2, float64((*(*uint8)(unsafe.Pointer(hmap.Pointer() + uintptr(9))))))) + numOldBuckets := (*(*uint16)(unsafe.Pointer(hmap.Pointer() + uintptr(10)))) + size += hack.RuntimeAllocSize(int64(numOldBuckets * 160)) + if len(cached.byID) > 0 || numBuckets > 1 { + size += hack.RuntimeAllocSize(int64(numBuckets * 160)) + } + for _, v := range cached.byID { + size += hack.RuntimeAllocSize(int64(len(v))) + } + } + return size +} diff --git a/go/mysql/collations/env.go b/go/mysql/collations/env.go index 91fc2a8bd8c..9fe87230649 100644 --- a/go/mysql/collations/env.go +++ b/go/mysql/collations/env.go @@ -248,10 +248,10 @@ func (env *Environment) CollationAlias(collation string) (string, bool) { // to a Collation ID, with the exception that it can only fit in 1 byte. // For MySQL 8.0+ environments, the default charset is `utf8mb4_0900_ai_ci`. // For older MySQL environments, the default charset is `utf8mb4_general_ci`. -func (env *Environment) DefaultConnectionCharset() uint8 { +func (env *Environment) DefaultConnectionCharset() ID { switch env.version { case collverMySQL8: - return uint8(CollationUtf8mb4ID) + return CollationUtf8mb4ID default: return 45 } @@ -267,7 +267,7 @@ func (env *Environment) DefaultConnectionCharset() uint8 { // handshake. // - empty, in which case the default connection charset for this MySQL version // is returned. -func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { +func (env *Environment) ParseConnectionCharset(csname string) (ID, error) { if csname == "" { return env.DefaultConnectionCharset(), nil } @@ -282,7 +282,7 @@ func (env *Environment) ParseConnectionCharset(csname string) (uint8, error) { if collid == 0 || collid > 255 { return 0, fmt.Errorf("unsupported connection charset: %q", csname) } - return uint8(collid), nil + return collid, nil } func (env *Environment) AllCollationIDs() []ID { diff --git a/go/mysql/collations/integration/charset_test.go b/go/mysql/collations/integration/charset_test.go index 8a4d12a0e4d..b1b747e768b 100644 --- a/go/mysql/collations/integration/charset_test.go +++ b/go/mysql/collations/integration/charset_test.go @@ -45,7 +45,7 @@ func TestLocalEncodings(t *testing.T) { defer conn.Close() for _, tc := range cases { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) verifyTranscoding(t, colldata.Lookup(local), remote, tc.input) } diff --git a/go/mysql/collations/integration/coercion_test.go b/go/mysql/collations/integration/coercion_test.go index dad55bcafad..c194b48c071 100644 --- a/go/mysql/collations/integration/coercion_test.go +++ b/go/mysql/collations/integration/coercion_test.go @@ -54,7 +54,7 @@ type testConcat struct { } func (tc *testConcat) Expression() string { - env := collations.Local() + env := collations.MySQL8() return fmt.Sprintf("CONCAT((_%s X'%x' COLLATE %q), (_%s X'%x' COLLATE %q))", colldata.Lookup(tc.left.Collation).Charset().Name(), tc.left.Text, env.LookupName(tc.left.Collation), colldata.Lookup(tc.right.Collation).Charset().Name(), tc.right.Text, env.LookupName(tc.right.Collation), @@ -63,7 +63,7 @@ func (tc *testConcat) Expression() string { func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local collations.TypedCollation, coercion1, coercion2 colldata.Coercion) { localCollation := colldata.Lookup(local.Collation) - remoteName := collations.Local().LookupName(remote.Collation) + remoteName := collations.MySQL8().LookupName(remote.Collation) assert.Equal(t, remoteName, localCollation.Name(), "bad collation resolved: local is %s, remote is %s", localCollation.Name(), remoteName) assert.Equal(t, remote.Coercibility, local.Coercibility, "bad coercibility resolved: local is %d, remote is %d", local.Coercibility, remote.Coercibility) @@ -85,8 +85,8 @@ func (tc *testConcat) Test(t *testing.T, remote *RemoteCoercionResult, local col rEBytes, err := remote.Expr.ToBytes() require.NoError(t, err) - assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.Local().LookupName(tc.left.Collation), - tc.right.Text, collations.Local().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), + assert.True(t, bytes.Equal(concat.Bytes(), rEBytes), "failed to concatenate text;\n\tCONCAT(%v COLLATE %s, %v COLLATE %s) = \n\tCONCAT(%v, %v) COLLATE %s = \n\t\t%v\n\n\texpected: %v", tc.left.Text, collations.MySQL8().LookupName(tc.left.Collation), + tc.right.Text, collations.MySQL8().LookupName(tc.right.Collation), leftText, rightText, localCollation.Name(), concat.Bytes(), rEBytes) } @@ -96,7 +96,7 @@ type testComparison struct { } func (tc *testComparison) Expression() string { - env := collations.Local() + env := collations.MySQL8() return fmt.Sprintf("(_%s X'%x' COLLATE %q) = (_%s X'%x' COLLATE %q)", env.LookupCharsetName(tc.left.Collation), tc.left.Text, env.LookupName(tc.left.Collation), env.LookupCharsetName(tc.right.Collation), tc.right.Text, env.LookupName(tc.right.Collation), @@ -135,7 +135,7 @@ func TestComparisonSemantics(t *testing.T) { t.Skipf("The behavior of Coercion Semantics is not correct before 8.0.31") } - for _, coll := range colldata.All(collations.Local()) { + for _, coll := range colldata.All(collations.MySQL8()) { text := verifyTranscoding(t, coll, remote.NewCollation(conn, coll.Name()), []byte(BaseString)) testInputs = append(testInputs, &TextWithCollation{Text: text, Collation: coll.ID()}) } @@ -175,7 +175,7 @@ func TestComparisonSemantics(t *testing.T) { Coercibility: 0, Repertoire: collations.RepertoireASCII, } - resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.Local(), left, right, + resultLocal, coercionLocal1, coercionLocal2, errLocal := colldata.Merge(collations.MySQL8(), left, right, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, @@ -194,7 +194,7 @@ func TestComparisonSemantics(t *testing.T) { query := fmt.Sprintf("SELECT CAST((%s) AS BINARY), COLLATION(%s), COERCIBILITY(%s)", expr, expr, expr) resultRemote, errRemote := conn.ExecuteFetch(query, 1, false) - env := collations.Local() + env := collations.MySQL8() if errRemote != nil { require.True(t, strings.Contains(errRemote.Error(), "Illegal mix of collations"), "query %s failed: %v", query, errRemote) @@ -212,7 +212,7 @@ func TestComparisonSemantics(t *testing.T) { continue } - remoteCollation := collations.Local().LookupByName(resultRemote.Rows[0][1].ToString()) + remoteCollation := collations.MySQL8().LookupByName(resultRemote.Rows[0][1].ToString()) remoteCI, _ := resultRemote.Rows[0][2].ToInt64() remoteTest.Test(t, &RemoteCoercionResult{ Expr: resultRemote.Rows[0][0], diff --git a/go/mysql/collations/integration/collations_test.go b/go/mysql/collations/integration/collations_test.go index e5362608e75..519f4560faf 100644 --- a/go/mysql/collations/integration/collations_test.go +++ b/go/mysql/collations/integration/collations_test.go @@ -38,7 +38,6 @@ import ( "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/remote" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sqlparser" ) @@ -46,9 +45,7 @@ var collationEnv *collations.Environment func init() { // We require MySQL 8.0 collations for the comparisons in the tests - mySQLVersion := "8.0.0" - servenv.SetMySQLServerVersionForTest(mySQLVersion) - collationEnv = collations.NewEnvironment(mySQLVersion) + collationEnv = collations.NewEnvironment("8.0.30") } func getSQLQueries(t *testing.T, testfile string) []string { @@ -63,7 +60,7 @@ func getSQLQueries(t *testing.T, testfile string) []string { addchunk := func() { if curchunk.Len() > 0 { - stmts, err := sqlparser.SplitStatementToPieces(curchunk.String()) + stmts, err := sqlparser.NewTestParser().SplitStatementToPieces(curchunk.String()) if err != nil { t.Fatal(err) } diff --git a/go/mysql/collations/integration/helpers_test.go b/go/mysql/collations/integration/helpers_test.go index d436280f04b..a5d2bb0cc36 100644 --- a/go/mysql/collations/integration/helpers_test.go +++ b/go/mysql/collations/integration/helpers_test.go @@ -52,7 +52,7 @@ func testRemoteWeights(t *testing.T, golden io.Writer, cases []testweight) { for _, tc := range cases { t.Run(tc.collation, func(t *testing.T) { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) localResult := colldata.Lookup(local).WeightString(nil, tc.input, 0) remoteResult := remote.WeightString(nil, tc.input, 0) @@ -85,7 +85,7 @@ func testRemoteComparison(t *testing.T, golden io.Writer, cases []testcmp) { for _, tc := range cases { t.Run(tc.collation, func(t *testing.T) { - local := collations.Local().LookupByName(tc.collation) + local := collations.MySQL8().LookupByName(tc.collation) remote := remote.NewCollation(conn, tc.collation) localResult := normalizecmp(colldata.Lookup(local).Collate(tc.left, tc.right, false)) remoteResult := remote.Collate(tc.left, tc.right, false) diff --git a/go/mysql/collations/integration/weight_string_test.go b/go/mysql/collations/integration/weight_string_test.go index 666856ca38b..ad4ad4270fc 100644 --- a/go/mysql/collations/integration/weight_string_test.go +++ b/go/mysql/collations/integration/weight_string_test.go @@ -60,7 +60,7 @@ func TestWeightStringsComprehensive(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := colldata.All(collations.Local()) + allCollations := colldata.All(collations.MySQL8()) sort.Slice(allCollations, func(i, j int) bool { return allCollations[i].ID() < allCollations[j].ID() }) @@ -104,7 +104,7 @@ func TestCJKWeightStrings(t *testing.T) { conn := mysqlconn(t) defer conn.Close() - allCollations := colldata.All(collations.Local()) + allCollations := colldata.All(collations.MySQL8()) testdata, _ := filepath.Glob("../internal/charset/testdata/*.txt") for _, testfile := range testdata { cs := filepath.Base(testfile) diff --git a/go/mysql/collations/integration/wildcard_test.go b/go/mysql/collations/integration/wildcard_test.go index 6475a35dd21..6a0271218dc 100644 --- a/go/mysql/collations/integration/wildcard_test.go +++ b/go/mysql/collations/integration/wildcard_test.go @@ -79,7 +79,7 @@ func TestRemoteWildcardMatches(t *testing.T) { {"Ǎḅeçd", "a%bd"}, } - for _, local := range colldata.All(collations.Local()) { + for _, local := range colldata.All(collations.MySQL8()) { t.Run(local.Name(), func(t *testing.T) { var remote = remote.NewCollation(conn, local.Name()) var err error diff --git a/go/mysql/collations/local.go b/go/mysql/collations/local.go index 3cf81b270c7..090420e07a7 100644 --- a/go/mysql/collations/local.go +++ b/go/mysql/collations/local.go @@ -19,37 +19,14 @@ limitations under the License. package collations import ( - "sync" - - "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/vt/servenv" ) -var defaultEnv *Environment -var defaultEnvInit sync.Once - -// Local is the default collation Environment for Vitess. This depends -// on the value of the `mysql_server_version` flag passed to this Vitess process. -func Local() *Environment { - defaultEnvInit.Do(func() { - if !flag.Parsed() { - panic("collations.Local() called too early") - } - defaultEnv = NewEnvironment(servenv.MySQLServerVersion()) - }) - return defaultEnv -} - -// Default returns the default collation for this Vitess process. -// This is based on the local collation environment, which is based on the user's configured -// MySQL version for this Vitess deployment. -func Default() ID { - return ID(Local().DefaultConnectionCharset()) -} - -func DefaultCollationForType(t sqltypes.Type) ID { - return CollationForType(t, Default()) +// MySQL8 is the collation Environment for MySQL 8. This should +// only be used for testing where we know it's safe to use this +// version, and we don't need a specific other version. +func MySQL8() *Environment { + return fetchCacheEnvironment(collverMySQL8) } func CollationForType(t sqltypes.Type, fallback ID) ID { diff --git a/go/mysql/collations/tools/maketestdata/maketestdata.go b/go/mysql/collations/tools/maketestdata/maketestdata.go index edad1c840a3..7adee5d5dfd 100644 --- a/go/mysql/collations/tools/maketestdata/maketestdata.go +++ b/go/mysql/collations/tools/maketestdata/maketestdata.go @@ -167,7 +167,7 @@ func main() { fs := pflag.NewFlagSet("maketestdata", pflag.ExitOnError) flag.Parse(fs) - var defaults = collations.Local() + var defaults = collations.MySQL8() var collationsForLanguage = make(map[testutil.Lang][]collations.ID) var allcollations = colldata.All(defaults) for lang := range testutil.KnownLanguages { diff --git a/go/mysql/config/config.go b/go/mysql/config/config.go index 8abf9d7dc71..cc08107f0a3 100644 --- a/go/mysql/config/config.go +++ b/go/mysql/config/config.go @@ -1,3 +1,4 @@ package config const DefaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" +const DefaultMySQLVersion = "8.0.30" diff --git a/go/mysql/conn.go b/go/mysql/conn.go index 85a8ffd4027..4dcf87c4867 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -215,10 +215,9 @@ type Conn struct { // this is used to mark the connection to be closed so that the command phase for the connection can be stopped and // the connection gets closed. closing bool -} -// splitStatementFunciton is the function that is used to split the statement in case of a multi-statement query. -var splitStatementFunction = sqlparser.SplitStatementToPieces + truncateErrLen int +} // PrepareData is a buffer used for store prepare statement meta data type PrepareData struct { @@ -249,7 +248,7 @@ var readersPool = sync.Pool{New: func() any { return bufio.NewReaderSize(nil, co // newConn is an internal method to create a Conn. Used by client and server // side for common creation code. -func newConn(conn net.Conn, flushDelay time.Duration) *Conn { +func newConn(conn net.Conn, flushDelay time.Duration, truncateErrLen int) *Conn { if flushDelay == 0 { flushDelay = DefaultFlushDelay } @@ -257,6 +256,7 @@ func newConn(conn net.Conn, flushDelay time.Duration) *Conn { conn: conn, bufferedReader: bufio.NewReaderSize(conn, connBufferSize), flushDelay: flushDelay, + truncateErrLen: truncateErrLen, } } @@ -277,11 +277,12 @@ func newServerConn(conn net.Conn, listener *Listener) *Conn { } c := &Conn{ - conn: conn, - listener: listener, - PrepareData: make(map[uint32]*PrepareData), - keepAliveOn: enabledKeepAlive, - flushDelay: listener.flushDelay, + conn: conn, + listener: listener, + PrepareData: make(map[uint32]*PrepareData), + keepAliveOn: enabledKeepAlive, + flushDelay: listener.flushDelay, + truncateErrLen: listener.truncateErrLen, } if listener.connReadBufferSize > 0 { @@ -1235,7 +1236,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { var queries []string if c.Capabilities&CapabilityClientMultiStatements != 0 { var err error - queries, err = splitStatementFunction(query) + queries, err = handler.SQLParser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) @@ -1248,14 +1249,14 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { queries = []string{query} } - // Popoulate PrepareData + // Populate PrepareData c.StatementID++ prepare := &PrepareData{ StatementID: c.StatementID, PrepareStmt: queries[0], } - statement, err := sqlparser.ParseStrictDDL(query) + statement, err := handler.SQLParser().ParseStrictDDL(query) if err != nil { log.Errorf("Conn %v: Error parsing prepared statement: %v", c, err) if !c.writeErrorPacketFromErrorAndLog(err) { @@ -1363,7 +1364,7 @@ func (c *Conn) handleComQuery(handler Handler, data []byte) (kontinue bool) { var queries []string var err error if c.Capabilities&CapabilityClientMultiStatements != 0 { - queries, err = splitStatementFunction(query) + queries, err = handler.SQLParser().SplitStatementToPieces(query) if err != nil { log.Errorf("Conn %v: Error splitting query: %v", c, err) return c.writeErrorPacketFromErrorAndLog(err) diff --git a/go/mysql/conn_fake.go b/go/mysql/conn_fake.go index c20d09a2f6d..7bc4fd5ff61 100644 --- a/go/mysql/conn_fake.go +++ b/go/mysql/conn_fake.go @@ -84,7 +84,7 @@ var _ net.Addr = (*mockAddress)(nil) // GetTestConn returns a conn for testing purpose only. func GetTestConn() *Conn { - return newConn(testConn{}, DefaultFlushDelay) + return newConn(testConn{}, DefaultFlushDelay, 0) } // GetTestServerConn is only meant to be used for testing. diff --git a/go/mysql/conn_flaky_test.go b/go/mysql/conn_flaky_test.go index 0057aff5aa6..da82a577753 100644 --- a/go/mysql/conn_flaky_test.go +++ b/go/mysql/conn_flaky_test.go @@ -31,18 +31,15 @@ import ( "testing" "time" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sqlparser" - "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" ) func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { @@ -77,8 +74,8 @@ func createSocketPair(t *testing.T) (net.Listener, *Conn, *Conn) { require.Nil(t, serverErr, "Accept failed: %v", serverErr) // Create a Conn on both sides. - cConn := newConn(clientConn, DefaultFlushDelay) - sConn := newConn(serverConn, DefaultFlushDelay) + cConn := newConn(clientConn, DefaultFlushDelay, 0) + sConn := newConn(serverConn, DefaultFlushDelay, 0) sConn.PrepareData = map[uint32]*PrepareData{} return listener, sConn, cConn @@ -878,14 +875,6 @@ func TestMultiStatement(t *testing.T) { func TestMultiStatementOnSplitError(t *testing.T) { listener, sConn, cConn := createSocketPair(t) - // Set the splitStatementFunction to return an error. - splitStatementFunction = func(blob string) (pieces []string, err error) { - return nil, fmt.Errorf("Error in split statements") - } - defer func() { - // Set the splitStatementFunction to the correct function back - splitStatementFunction = sqlparser.SplitStatementToPieces - }() sConn.Capabilities |= CapabilityClientMultiStatements defer func() { listener.Close() @@ -893,7 +882,7 @@ func TestMultiStatementOnSplitError(t *testing.T) { cConn.Close() }() - err := cConn.WriteComQuery("select 1;select 2") + err := cConn.WriteComQuery("broken>'query 1;parse 0 && len(addrs) > 1 { - s := 0 - n := rand.Intn(sum) - for i := range addrs { - s += int(addrs[i].Weight) - if s > n { - if i > 0 { - t := addrs[i] - copy(addrs[1:i+1], addrs[0:i]) - addrs[0] = t - } - break - } - } - sum -= int(addrs[0].Weight) - addrs = addrs[1:] - } -} - -func (addrs byPriorityWeight) sortRfc2782(rand *rand.Rand) { - sort.Sort(addrs) - i := 0 - for j := 1; j < len(addrs); j++ { - if addrs[i].Priority != addrs[j].Priority { - addrs[i:j].shuffleByWeight(rand) - i = j - } - } - addrs[i:].shuffleByWeight(rand) -} - -// SortRfc2782 reorders SRV records as specified in RFC 2782. -func SortRfc2782(srvs []*net.SRV) { - byPriorityWeight(srvs).sortRfc2782(rand.New(rand.NewSource(time.Now().UTC().UnixNano()))) -} - // SplitHostPort is an alternative to net.SplitHostPort that also parses the // integer port. In addition, it is more tolerant of improperly escaped IPv6 // addresses, such as "::1:456", which should actually be "[::1]:456". @@ -164,29 +106,6 @@ func FullyQualifiedHostnameOrPanic() string { return hostname } -// ResolveIPv4Addrs resolves the address:port part into IP address:port pairs -func ResolveIPv4Addrs(addr string) ([]string, error) { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - ipAddrs, err := net.LookupIP(host) - if err != nil { - return nil, err - } - result := make([]string, 0, len(ipAddrs)) - for _, ipAddr := range ipAddrs { - ipv4 := ipAddr.To4() - if ipv4 != nil { - result = append(result, net.JoinHostPort(ipv4.String(), port)) - } - } - if len(result) == 0 { - return nil, fmt.Errorf("no IPv4addr for name %v", host) - } - return result, nil -} - func dnsLookup(host string) ([]net.IP, error) { addrs, err := net.LookupHost(host) if err != nil { diff --git a/go/netutil/netutil_test.go b/go/netutil/netutil_test.go index b8cfc563acb..c0c0e16cfed 100644 --- a/go/netutil/netutil_test.go +++ b/go/netutil/netutil_test.go @@ -17,70 +17,9 @@ limitations under the License. package netutil import ( - "fmt" - "math/rand" - "net" - "reflect" "testing" ) -func checkDistribution(t *testing.T, rand *rand.Rand, data []*net.SRV, margin float64) { - sum := 0 - for _, srv := range data { - sum += int(srv.Weight) - } - - results := make(map[string]int) - - count := 1000 - for j := 0; j < count; j++ { - d := make([]*net.SRV, len(data)) - copy(d, data) - byPriorityWeight(d).shuffleByWeight(rand) - key := d[0].Target - results[key] = results[key] + 1 - } - - actual := results[data[0].Target] - expected := float64(count) * float64(data[0].Weight) / float64(sum) - diff := float64(actual) - expected - t.Logf("actual: %v diff: %v e: %v m: %v", actual, diff, expected, margin) - if diff < 0 { - diff = -diff - } - if diff > (expected * margin) { - t.Errorf("missed target weight: expected %v, %v", expected, actual) - } -} - -func testUniformity(t *testing.T, size int, margin float64) { - data := make([]*net.SRV, size) - for i := 0; i < size; i++ { - data[i] = &net.SRV{Target: fmt.Sprintf("%c", 'a'+i), Weight: 1} - } - checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) -} - -func TestUniformity(t *testing.T) { - testUniformity(t, 2, 0.05) - testUniformity(t, 3, 0.10) - testUniformity(t, 10, 0.20) - testWeighting(t, 0.05) -} - -func testWeighting(t *testing.T, margin float64) { - data := []*net.SRV{ - {Target: "a", Weight: 60}, - {Target: "b", Weight: 30}, - {Target: "c", Weight: 10}, - } - checkDistribution(t, rand.New(rand.NewSource(1)), data, margin) -} - -func TestWeighting(t *testing.T) { - testWeighting(t, 0.05) -} - func TestSplitHostPort(t *testing.T) { type addr struct { host string @@ -133,43 +72,6 @@ func TestJoinHostPort(t *testing.T) { } } -func TestResolveIPv4Addrs(t *testing.T) { - cases := []struct { - address string - expected []string - expectedError bool - }{ - { - address: "localhost:3306", - expected: []string{"127.0.0.1:3306"}, - }, - { - address: "127.0.0.256:3306", - expectedError: true, - }, - { - address: "localhost", - expectedError: true, - }, - { - address: "InvalidHost:3306", - expectedError: true, - }, - } - - for _, c := range cases { - t.Run(c.address, func(t *testing.T) { - got, err := ResolveIPv4Addrs(c.address) - if (err != nil) != c.expectedError { - t.Errorf("expected error but got: %v", err) - } - if !reflect.DeepEqual(got, c.expected) { - t.Errorf("expected: %v, got: %v", c.expected, got) - } - }) - } -} - func TestNormalizeIP(t *testing.T) { table := map[string]string{ "1.2.3.4": "1.2.3.4", diff --git a/go/ratelimiter/ratelimiter.go b/go/ratelimiter/ratelimiter.go deleted file mode 100644 index ddadb8659da..00000000000 --- a/go/ratelimiter/ratelimiter.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package ratelimiter implements rate limiting functionality. -package ratelimiter - -import ( - "sync" - "time" -) - -// RateLimiter was inspired by https://github.com/golang/go/wiki/RateLimiting. -// However, the go example is not good for setting high qps limits because -// it will cause the ticker to fire too often. Also, the ticker will continue -// to fire when the system is idle. This new Ratelimiter achieves the same thing, -// but by using just counters with no tickers or channels. -type RateLimiter struct { - maxCount int - interval time.Duration - - mu sync.Mutex - curCount int - lastTime time.Time -} - -// NewRateLimiter creates a new RateLimiter. maxCount is the max burst allowed -// while interval specifies the duration for a burst. The effective rate limit is -// equal to maxCount/interval. For example, if you want to a max QPS of 5000, -// and want to limit bursts to no more than 500, you'd specify a maxCount of 500 -// and an interval of 100*time.Millilsecond. -func NewRateLimiter(maxCount int, interval time.Duration) *RateLimiter { - return &RateLimiter{ - maxCount: maxCount, - interval: interval, - } -} - -// Allow returns true if a request is within the rate limit norms. -// Otherwise, it returns false. -func (rl *RateLimiter) Allow() bool { - rl.mu.Lock() - defer rl.mu.Unlock() - if time.Since(rl.lastTime) < rl.interval { - if rl.curCount > 0 { - rl.curCount-- - return true - } - return false - } - rl.curCount = rl.maxCount - 1 - rl.lastTime = time.Now() - return true -} diff --git a/go/ratelimiter/ratelimiter_test.go b/go/ratelimiter/ratelimiter_test.go deleted file mode 100644 index 768584b20f7..00000000000 --- a/go/ratelimiter/ratelimiter_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ratelimiter - -import ( - "testing" - "time" -) - -func TestLimiter1(t *testing.T) { - rl := NewRateLimiter(1, 10*time.Millisecond) - result := rl.Allow() - if !result { - t.Error("Allow: false, want true") - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } - - time.Sleep(11 * time.Millisecond) - result = rl.Allow() - if !result { - t.Error("Allow: false, want true") - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } -} - -func TestLimiter2(t *testing.T) { - rl := NewRateLimiter(2, 10*time.Millisecond) - var result bool - for i := 0; i < 2; i++ { - result = rl.Allow() - if !result { - t.Errorf("Allow(%d): false, want true", i) - } - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } - - time.Sleep(11 * time.Millisecond) - for i := 0; i < 2; i++ { - result = rl.Allow() - if !result { - t.Errorf("Allow(%d): false, want true", i) - } - } - result = rl.Allow() - if result { - t.Error("Allow: true, want false") - } -} diff --git a/go/sqltypes/type.go b/go/sqltypes/type.go index d3436ed8718..964dd6b5d83 100644 --- a/go/sqltypes/type.go +++ b/go/sqltypes/type.go @@ -189,7 +189,7 @@ const ( // If you add to this map, make sure you add a test case // in tabletserver/endtoend. -var mysqlToType = map[int64]querypb.Type{ +var mysqlToType = map[byte]querypb.Type{ 0: Decimal, 1: Int8, 2: Int16, @@ -275,7 +275,7 @@ func modifyType(typ querypb.Type, flags int64) querypb.Type { } // MySQLToType computes the vitess type from mysql type and flags. -func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { +func MySQLToType(mysqlType byte, flags int64) (typ querypb.Type, err error) { result, ok := mysqlToType[mysqlType] if !ok { return 0, fmt.Errorf("unsupported type: %d", mysqlType) @@ -303,7 +303,7 @@ func AreTypesEquivalent(mysqlTypeFromBinlog, mysqlTypeFromSchema querypb.Type) b // typeToMySQL is the reverse of mysqlToType. var typeToMySQL = map[querypb.Type]struct { - typ int64 + typ byte flags int64 }{ Int8: {typ: 1}, @@ -342,7 +342,7 @@ var typeToMySQL = map[querypb.Type]struct { } // TypeToMySQL returns the equivalent mysql type and flag for a vitess type. -func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { +func TypeToMySQL(typ querypb.Type) (mysqlType byte, flags int64) { val := typeToMySQL[typ] return val.typ, val.flags } diff --git a/go/sqltypes/type_test.go b/go/sqltypes/type_test.go index f223c5811e3..edf340b2abb 100644 --- a/go/sqltypes/type_test.go +++ b/go/sqltypes/type_test.go @@ -285,7 +285,7 @@ func TestTypeToMySQL(t *testing.T) { func TestMySQLToType(t *testing.T) { testcases := []struct { - intype int64 + intype byte inflags int64 outtype querypb.Type }{{ diff --git a/go/streamlog/streamlog.go b/go/streamlog/streamlog.go index 25e60182c4a..6d9f81f98d9 100644 --- a/go/streamlog/streamlog.go +++ b/go/streamlog/streamlog.go @@ -61,18 +61,10 @@ func SetRedactDebugUIQueries(newRedactDebugUIQueries bool) { redactDebugUIQueries = newRedactDebugUIQueries } -func GetQueryLogFilterTag() string { - return queryLogFilterTag -} - func SetQueryLogFilterTag(newQueryLogFilterTag string) { queryLogFilterTag = newQueryLogFilterTag } -func GetQueryLogRowThreshold() uint64 { - return queryLogRowThreshold -} - func SetQueryLogRowThreshold(newQueryLogRowThreshold uint64) { queryLogRowThreshold = newQueryLogRowThreshold } diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index 3a1390eec0c..a9cc482b9e3 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -765,19 +765,18 @@ func (cluster *LocalProcessCluster) populateVersionInfo() error { return err } +var versionRegex = regexp.MustCompile(`Version: ([0-9]+)\.([0-9]+)\.([0-9]+)`) + func GetMajorVersion(binaryName string) (int, error) { version, err := exec.Command(binaryName, "--version").Output() if err != nil { return 0, err } - versionRegex := regexp.MustCompile(`Version: ([0-9]+)\.([0-9]+)\.([0-9]+)`) v := versionRegex.FindStringSubmatch(string(version)) if len(v) != 4 { return 0, fmt.Errorf("could not parse server version from: %s", version) } - if err != nil { - return 0, fmt.Errorf("could not parse server version from: %s", version) - } + return strconv.Atoi(v[1]) } diff --git a/go/test/endtoend/cluster/mysqlctl_process.go b/go/test/endtoend/cluster/mysqlctl_process.go index 79248b6d9a7..cfc4fc28088 100644 --- a/go/test/endtoend/cluster/mysqlctl_process.go +++ b/go/test/endtoend/cluster/mysqlctl_process.go @@ -253,16 +253,6 @@ func (mysqlctl *MysqlctlProcess) CleanupFiles(tabletUID int) { os.RemoveAll(path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", tabletUID))) } -// Connect returns a new connection to the underlying MySQL server -func (mysqlctl *MysqlctlProcess) Connect(ctx context.Context, username string) (*mysql.Conn, error) { - params := mysql.ConnParams{ - Uname: username, - UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", mysqlctl.TabletUID), "/mysql.sock"), - } - - return mysql.Connect(ctx, ¶ms) -} - // MysqlCtlProcessInstanceOptionalInit returns a Mysqlctl handle for mysqlctl process // configured with the given Config. func MysqlCtlProcessInstanceOptionalInit(tabletUID int, mySQLPort int, tmpDirectory string, initMySQL bool) (*MysqlctlProcess, error) { diff --git a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go index dd3cb1dbb4c..f289b4d83b2 100644 --- a/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go +++ b/go/test/endtoend/onlineddl/scheduler/onlineddl_scheduler_test.go @@ -127,7 +127,8 @@ deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, func parseTableName(t *testing.T, sql string) (tableName string) { // ddlStatement could possibly be composed of multiple DDL statements - tokenizer := sqlparser.NewStringTokenizer(sql) + parser := sqlparser.NewTestParser() + tokenizer := parser.NewStringTokenizer(sql) for { stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) if err != nil && errors.Is(err, io.EOF) { diff --git a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go index 055dc7a1df5..5a5ecf11428 100644 --- a/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go +++ b/go/test/endtoend/schemadiff/vrepl/schemadiff_vrepl_suite_test.go @@ -345,12 +345,12 @@ func ignoreAutoIncrement(t *testing.T, createTable string) string { func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, allowSchemadiffNormalization bool, hints *schemadiff.DiffHints) { // turn the "from" and "to" create statement strings (which we just read via SHOW CREATE TABLE into sqlparser.CreateTable statement) - fromStmt, err := sqlparser.ParseStrictDDL(fromCreateTable) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(fromCreateTable) require.NoError(t, err) fromCreateTableStatement, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(toCreateTable) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(toCreateTable) require.NoError(t, err) toCreateTableStatement, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -394,7 +394,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al // structure is identical. And so we accept that there can be a normalization issue. if allowSchemadiffNormalization { { - stmt, err := sqlparser.ParseStrictDDL(toCreateTable) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(toCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -403,7 +403,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al toCreateTable = c.Create().CanonicalStatementString() } { - stmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) createTableStatement, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -418,7 +418,7 @@ func validateDiff(t *testing.T, fromCreateTable string, toCreateTable string, al assert.Equal(t, toCreateTable, resultCreateTable, "mismatched table structure. ALTER query was: %s", diffedAlterQuery) // Also, let's see that our diff agrees there's no change: - resultStmt, err := sqlparser.ParseStrictDDL(resultCreateTable) + resultStmt, err := sqlparser.NewTestParser().ParseStrictDDL(resultCreateTable) require.NoError(t, err) resultCreateTableStatement, ok := resultStmt.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/test/endtoend/utils/mysql.go b/go/test/endtoend/utils/mysql.go index 888c0cc8959..ca43ff15970 100644 --- a/go/test/endtoend/utils/mysql.go +++ b/go/test/endtoend/utils/mysql.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" @@ -61,7 +63,7 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( var cfg dbconfigs.DBConfigs // ensure the DBA username is 'root' instead of the system's default username so that mysqladmin can shutdown cfg.Dba.User = "root" - cfg.InitWithSocket(mycnf.SocketFile) + cfg.InitWithSocket(mycnf.SocketFile, collations.MySQL8()) return mysqlctl.NewMysqld(&cfg), mycnf, nil } @@ -193,7 +195,7 @@ func compareVitessAndMySQLResults(t *testing.T, query string, vtConn *mysql.Conn } } } - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Error(err) return err diff --git a/go/test/endtoend/vreplication/config_test.go b/go/test/endtoend/vreplication/config_test.go index 0e430548a13..62f20f36e80 100644 --- a/go/test/endtoend/vreplication/config_test.go +++ b/go/test/endtoend/vreplication/config_test.go @@ -60,6 +60,7 @@ create table geom_tbl (id int, g geometry, p point, ls linestring, pg polygon, m create table ` + "`blüb_tbl`" + ` (id int, val1 varchar(20), ` + "`blöb1`" + ` blob, val2 varbinary(20), ` + "`bl@b2`" + ` longblob, txt1 text, blb3 tinyblob, txt2 longtext, blb4 mediumblob, primary key(id)); create table reftable (id int, val1 varchar(20), primary key(id), key(val1)); create table loadtest (id int, name varchar(256), primary key(id), key(name)); +create table nopk (name varchar(128), age int unsigned); ` // These should always be ignored in vreplication internalSchema = ` @@ -94,6 +95,7 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); "db_order_test": {}, "vdiff_order": {}, "datze": {}, + "nopk": {}, "reftable": { "type": "reference" } @@ -216,6 +218,14 @@ create table loadtest (id int, name varchar(256), primary key(id), key(name)); } ] }, + "nopk": { + "column_vindexes": [ + { + "columns": ["name"], + "name": "unicode_loose_md5" + } + ] + }, "reftable": { "type": "reference" } diff --git a/go/test/endtoend/vreplication/helper_test.go b/go/test/endtoend/vreplication/helper_test.go index 07c12caf194..fc7d66bc732 100644 --- a/go/test/endtoend/vreplication/helper_test.go +++ b/go/test/endtoend/vreplication/helper_test.go @@ -428,7 +428,7 @@ func confirmTablesHaveSecondaryKeys(t *testing.T, tablets []*cluster.VttabletPro require.NotNil(t, res) row := res.Named().Row() tableSchema := row["Create Table"].ToString() - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := sqlparser.NewTestParser().ParseStrictDDL(tableSchema) require.NoError(t, err) createTable, ok := parsedDDL.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/test/endtoend/vreplication/vdiff2_test.go b/go/test/endtoend/vreplication/vdiff2_test.go index b3b235f978a..2465b1b2519 100644 --- a/go/test/endtoend/vreplication/vdiff2_test.go +++ b/go/test/endtoend/vreplication/vdiff2_test.go @@ -70,7 +70,7 @@ var testCases = []*testCase{ sourceShards: "0", targetShards: "-80,80-", tabletBaseID: 200, - tables: "customer,Lead,Lead-1", + tables: "customer,Lead,Lead-1,nopk", autoRetryError: true, retryInsert: `insert into customer(cid, name, typ) values(1991234, 'Testy McTester', 'soho')`, resume: true, @@ -117,7 +117,7 @@ func TestVDiff2(t *testing.T) { sourceShards := []string{"0"} targetKs := "customer" targetShards := []string{"-80", "80-"} - // This forces us to use multiple vstream packets even with small test tables + // This forces us to use multiple vstream packets even with small test tables. extraVTTabletArgs = []string{"--vstream_packet_size=1"} vc = NewVitessCluster(t, "TestVDiff2", strings.Split(allCellNames, ","), mainClusterConfig) @@ -150,7 +150,11 @@ func TestVDiff2(t *testing.T) { query := `insert into customer(cid, name, typ, sport) values(1001, null, 'soho','')` execVtgateQuery(t, vtgateConn, fmt.Sprintf("%s:%s", sourceKs, sourceShards[0]), query) - generateMoreCustomers(t, sourceKs, 100000) + generateMoreCustomers(t, sourceKs, 1000) + + // Create rows in the nopk table using the customer names and random ages between 20 and 100. + _, err = vtgateConn.ExecuteFetch(fmt.Sprintf("insert into %s.nopk(name, age) select name, floor(rand()*80)+20 from %s.customer", sourceKs, sourceKs), -1, false) + require.NoError(t, err, "failed to insert rows into nopk table: %v", err) // The primary tablet is only added in the first cell. // We ONLY add primary tablets in this test. diff --git a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go index ae2b9324bb6..23ad27f6750 100644 --- a/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go +++ b/go/test/endtoend/vtgate/foreignkey/stress/fk_stress_test.go @@ -810,7 +810,7 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } if expectHint != "" { - stmt, err := sqlparser.Parse(alterStatement) + stmt, err := sqlparser.NewTestParser().Parse(alterStatement) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) diff --git a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go index e7a55369f8c..8e234063e10 100644 --- a/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go +++ b/go/test/endtoend/vtgate/queries/aggregation/aggregation_test.go @@ -73,7 +73,10 @@ func TestAggregateTypes(t *testing.T) { mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by a", `[[VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)] [VARCHAR("d") INT64(1)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select val1 as a, count(*) from aggr_test group by a order by 2, a", `[[VARCHAR("b") INT64(1)] [VARCHAR("d") INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("c") INT64(2)] [VARCHAR("e") INT64(2)]]`) mcmp.AssertMatches("select sum(val1) from aggr_test", `[[FLOAT64(0)]]`) - mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + t.Run("Average for sharded keyspaces", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + }) } func TestGroupBy(t *testing.T) { @@ -174,11 +177,14 @@ func TestAggrOnJoin(t *testing.T) { mcmp.AssertMatches("select a.val1 from aggr_test a join t3 t on a.val2 = t.id7 group by a.val1 having count(*) = 4", `[[VARCHAR("a")]]`) - mcmp.AssertMatches(`select avg(a1.val2), avg(a2.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, - "[[DECIMAL(1.5000) DECIMAL(1.0000)]]") + t.Run("Average in join for sharded", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches(`select avg(a1.val2), avg(a2.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7`, + "[[DECIMAL(1.5000) DECIMAL(1.0000)]]") - mcmp.AssertMatches(`select a1.val1, avg(a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, - `[[VARCHAR("a") DECIMAL(1.0000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.0000)]]`) + mcmp.AssertMatches(`select a1.val1, avg(a1.val2) from aggr_test a1 join aggr_test a2 on a1.val2 = a2.id join t3 t on a2.val2 = t.id7 group by a1.val1`, + `[[VARCHAR("a") DECIMAL(1.0000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.0000)]]`) + }) } @@ -327,21 +333,27 @@ func TestAggOnTopOfLimit(t *testing.T) { mcmp.AssertMatches("select count(*) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2)]]") mcmp.AssertMatches("select count(val1) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1)]]") mcmp.AssertMatches("select count(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0)]]") - mcmp.AssertMatches("select avg(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[NULL]]") mcmp.AssertMatches("select val1, count(*) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(1)]]`) mcmp.AssertMatchesNoOrder("select val1, count(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL INT64(1)] [VARCHAR("a") INT64(2)] [VARCHAR("b") INT64(1)] [VARCHAR("c") INT64(2)]]`) - mcmp.AssertMatchesNoOrder("select val1, avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL DECIMAL(2.0000)] [VARCHAR("a") DECIMAL(3.5000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.5000)]]`) + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches("select avg(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[NULL]]") + mcmp.AssertMatchesNoOrder("select val1, avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", `[[NULL DECIMAL(2.0000)] [VARCHAR("a") DECIMAL(3.5000)] [VARCHAR("b") DECIMAL(1.0000)] [VARCHAR("c") DECIMAL(3.5000)]]`) + }) // mysql returns FLOAT64(0), vitess returns DECIMAL(0) - mcmp.AssertMatches("select count(*), sum(val1), avg(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0) FLOAT64(0)]]") mcmp.AssertMatches("select count(val1), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7)]]") - mcmp.AssertMatches("select count(val1), sum(id), avg(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7) DECIMAL(3.5000)]]") mcmp.AssertMatches("select count(*), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(2) DECIMAL(14)]]") mcmp.AssertMatches("select count(val1), sum(id) from (select id, val1 from aggr_test where val2 is null limit 2) as x", "[[INT64(1) DECIMAL(14)]]") mcmp.AssertMatches("select count(val2), sum(val2) from (select id, val2 from aggr_test where val2 is null limit 2) as x", "[[INT64(0) NULL]]") mcmp.AssertMatches("select val1, count(*), sum(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 limit 2) as x group by val1", `[[NULL INT64(1) DECIMAL(7)] [VARCHAR("a") INT64(1) DECIMAL(2)]]`) - mcmp.AssertMatchesNoOrder("select val1, count(val2), sum(val2), avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", - `[[NULL INT64(1) DECIMAL(2) DECIMAL(2.0000)] [VARCHAR("a") INT64(2) DECIMAL(7) DECIMAL(3.5000)] [VARCHAR("b") INT64(1) DECIMAL(1) DECIMAL(1.0000)] [VARCHAR("c") INT64(2) DECIMAL(7) DECIMAL(3.5000)]]`) + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches("select count(*), sum(val1), avg(val1) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) FLOAT64(0) FLOAT64(0)]]") + mcmp.AssertMatches("select count(val1), sum(id), avg(id) from (select id, val1 from aggr_test where val2 < 4 order by val1 desc limit 2) as x", "[[INT64(2) DECIMAL(7) DECIMAL(3.5000)]]") + mcmp.AssertMatchesNoOrder("select val1, count(val2), sum(val2), avg(val2) from (select val1, val2 from aggr_test limit 8) as x group by val1", + `[[NULL INT64(1) DECIMAL(2) DECIMAL(2.0000)] [VARCHAR("a") INT64(2) DECIMAL(7) DECIMAL(3.5000)] [VARCHAR("b") INT64(1) DECIMAL(1) DECIMAL(1.0000)] [VARCHAR("c") INT64(2) DECIMAL(7) DECIMAL(3.5000)]]`) + }) }) } } @@ -355,10 +367,13 @@ func TestEmptyTableAggr(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") + }) }) } @@ -369,10 +384,13 @@ func TestEmptyTableAggr(t *testing.T) { utils.Exec(t, mcmp.VtConn, fmt.Sprintf("set workload = %s", workload)) mcmp.AssertMatches(" select count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") mcmp.AssertMatches(" select count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") - mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") mcmp.AssertMatches(" select t1.`name`, count(*) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") - mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches(" select count(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[INT64(0)]]") + mcmp.AssertMatches(" select avg(t1.value) from t2 inner join t1 on (t1.t1_id = t2.id) where t1.value = 'foo'", "[[NULL]]") + mcmp.AssertMatches(" select t1.`name`, count(*) from t1 inner join t2 on (t1.t1_id = t2.id) where t1.value = 'foo' group by t1.`name`", "[]") + }) }) } @@ -414,35 +432,38 @@ func TestAggregateLeftJoin(t *testing.T) { mcmp.AssertMatches("SELECT count(*) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[INT64(2)]]`) mcmp.AssertMatches("SELECT sum(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) mcmp.AssertMatches("SELECT sum(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1)]]`) - mcmp.AssertMatches("SELECT avg(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(0.5000)]]`) - mcmp.AssertMatches("SELECT avg(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1.0000)]]`) mcmp.AssertMatches("SELECT count(*) FROM t2 LEFT JOIN t1 ON t1.t1_id = t2.id WHERE IFNULL(t1.name, 'NOTSET') = 'r'", `[[INT64(1)]]`) - aggregations := []string{ - "count(t1.shardkey)", - "count(t2.shardkey)", - "sum(t1.shardkey)", - "sum(t2.shardkey)", - "avg(t1.shardkey)", - "avg(t2.shardkey)", - "count(*)", - } + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches("SELECT avg(t1.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(0.5000)]]`) + mcmp.AssertMatches("SELECT avg(t2.shardkey) FROM t1 LEFT JOIN t2 ON t1.t1_id = t2.id", `[[DECIMAL(1.0000)]]`) + aggregations := []string{ + "count(t1.shardkey)", + "count(t2.shardkey)", + "sum(t1.shardkey)", + "sum(t2.shardkey)", + "avg(t1.shardkey)", + "avg(t2.shardkey)", + "count(*)", + } - grouping := []string{ - "t1.t1_id", - "t1.shardKey", - "t1.value", - "t2.id", - "t2.shardKey", - } + grouping := []string{ + "t1.t1_id", + "t1.shardKey", + "t1.value", + "t2.id", + "t2.shardKey", + } - // quickly construct a big number of left join aggregation queries that have to be executed using the hash join - for _, agg := range aggregations { - for _, gb := range grouping { - query := fmt.Sprintf("SELECT %s FROM t1 LEFT JOIN (select id, shardkey from t2 limit 100) as t2 ON t1.t1_id = t2.id group by %s", agg, gb) - mcmp.Exec(query) + // quickly construct a big number of left join aggregation queries that have to be executed using the hash join + for _, agg := range aggregations { + for _, gb := range grouping { + query := fmt.Sprintf("SELECT %s FROM t1 LEFT JOIN (select id, shardkey from t2 limit 100) as t2 ON t1.t1_id = t2.id group by %s", agg, gb) + mcmp.Exec(query) + } } - } + }) } // TestScalarAggregate tests validates that only count is returned and no additional field is returned.gst @@ -470,7 +491,10 @@ func TestScalarAggregate(t *testing.T) { mcmp.Exec("insert into aggr_test(id, val1, val2) values(1,'a',1), (2,'A',1), (3,'b',1), (4,'c',3), (5,'c',4)") mcmp.AssertMatches("select count(distinct val1) from aggr_test", `[[INT64(3)]]`) - mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.AssertMatches("select avg(val1) from aggr_test", `[[FLOAT64(0)]]`) + }) } func TestAggregationRandomOnAnAggregatedValue(t *testing.T) { @@ -523,11 +547,14 @@ func TestComplexAggregation(t *testing.T) { mcmp.Exec(`SELECT 1+COUNT(t1_id) FROM t1`) mcmp.Exec(`SELECT COUNT(t1_id)+1 FROM t1`) mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey) FROM t1`) - mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey)+AVG(t1_id) FROM t1`) mcmp.Exec(`SELECT shardkey, MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) mcmp.Exec(`SELECT shardkey + MIN(t1_id)+MAX(t1_id) FROM t1 GROUP BY shardkey`) mcmp.Exec(`SELECT name+COUNT(t1_id)+1 FROM t1 GROUP BY name`) mcmp.Exec(`SELECT COUNT(*)+shardkey+MIN(t1_id)+1+MAX(t1_id)*SUM(t1_id)+1+name FROM t1 GROUP BY shardkey, name`) + t.Run("Average in sharded query", func(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") + mcmp.Exec(`SELECT COUNT(t1_id)+MAX(shardkey)+AVG(t1_id) FROM t1`) + }) } // TestGroupConcatAggregation tests the group_concat function with vitess doing the aggregation. diff --git a/go/test/endtoend/vtgate/queries/derived/cte_test.go b/go/test/endtoend/vtgate/queries/derived/cte_test.go index 677a5dba653..61ddf5d6661 100644 --- a/go/test/endtoend/vtgate/queries/derived/cte_test.go +++ b/go/test/endtoend/vtgate/queries/derived/cte_test.go @@ -18,9 +18,12 @@ package misc import ( "testing" + + "vitess.io/vitess/go/test/endtoend/utils" ) func TestCTEWithOrderByLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -28,6 +31,7 @@ func TestCTEWithOrderByLimit(t *testing.T) { } func TestCTEAggregationOnRHS(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -36,6 +40,7 @@ func TestCTEAggregationOnRHS(t *testing.T) { } func TestCTERemoveInnerOrderBy(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -43,6 +48,7 @@ func TestCTERemoveInnerOrderBy(t *testing.T) { } func TestCTEWithHaving(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() @@ -53,6 +59,7 @@ func TestCTEWithHaving(t *testing.T) { } func TestCTEColumns(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") mcmp, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/queries/derived/derived_test.go b/go/test/endtoend/vtgate/queries/derived/derived_test.go index 293dddb355c..80ae36633e1 100644 --- a/go/test/endtoend/vtgate/queries/derived/derived_test.go +++ b/go/test/endtoend/vtgate/queries/derived/derived_test.go @@ -92,6 +92,7 @@ func TestDerivedTableColumns(t *testing.T) { // We do this by not using the apply join we usually use, and instead use the hash join engine primitive // These tests exercise these situations func TestDerivedTablesWithLimit(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") // We need full type info before planning this, so we wait for the schema tracker require.NoError(t, utils.WaitForAuthoritative(t, keyspaceName, "user", clusterInstance.VtgateProcess.ReadVSchema)) diff --git a/go/test/endtoend/vtgate/queries/dml/insert_test.go b/go/test/endtoend/vtgate/queries/dml/insert_test.go index aa34761ee2b..80d0602b898 100644 --- a/go/test/endtoend/vtgate/queries/dml/insert_test.go +++ b/go/test/endtoend/vtgate/queries/dml/insert_test.go @@ -21,7 +21,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" ) @@ -69,10 +71,19 @@ func TestFailureInsertSelect(t *testing.T) { // primary key same mcmp.AssertContainsError("insert into s_tbl(id, num) select id, num*20 from s_tbl where id = 1", `AlreadyExists desc = Duplicate entry '1' for key`) // lookup key same (does not fail on MySQL as there is no lookup, and we have not put unique constraint on num column) - utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`) - // mismatch column count - mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`) - mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`) + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion >= 19 { + utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `(errno 1062) (sqlstate 23000)`) + // mismatch column count + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count with the row`) + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count with the row`) + } else { + utils.AssertContainsError(t, mcmp.VtConn, "insert into s_tbl(id, num) select id*20, num from s_tbl where id = 1", `lookup.Create: Code: ALREADY_EXISTS`) + // mismatch column count + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100,200,300", `column count does not match value count at row 1`) + mcmp.AssertContainsError("insert into s_tbl(id, num) select 100", `column count does not match value count at row 1`) + } }) } } diff --git a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go index 0be8a50b328..4ae5432bc81 100644 --- a/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go +++ b/go/test/endtoend/vtgate/queries/informationschema/informationschema_test.go @@ -223,6 +223,7 @@ func TestInfrSchemaAndUnionAll(t *testing.T) { } func TestTypeORMQuery(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") // This test checks that we can run queries similar to the ones that the TypeORM framework uses require.NoError(t, @@ -259,6 +260,7 @@ FROM (SELECT TABLE_SCHEMA, TABLE_NAME, COLUMN_NAME } func TestJoinWithSingleShardQueryOnRHS(t *testing.T) { + utils.SkipIfBinaryIsBelowVersion(t, 19, "vtgate") // This test checks that we can run queries like this, where the RHS is a single shard query mcmp, closer := start(t) defer closer() diff --git a/go/test/endtoend/vtgate/queries/misc/misc_test.go b/go/test/endtoend/vtgate/queries/misc/misc_test.go index 0fdee1b88a1..465b0adf883 100644 --- a/go/test/endtoend/vtgate/queries/misc/misc_test.go +++ b/go/test/endtoend/vtgate/queries/misc/misc_test.go @@ -59,8 +59,25 @@ func TestBitVals(t *testing.T) { mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010'`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`) mcmp.AssertMatches(`select b'1001', 0x9, B'010011011010' from t1`, `[[VARBINARY("\t") VARBINARY("\t") VARBINARY("\x04\xda")]]`) - mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) - mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion >= 19 { + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[INT64(10) UINT64(11) INT64(1245)]]`) + } else { + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010'`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`) + mcmp.AssertMatchesNoCompare(`select 1 + b'1001', 2 + 0x9, 3 + B'010011011010' from t1`, `[[INT64(10) UINT64(11) INT64(1245)]]`, `[[UINT64(10) UINT64(11) UINT64(1245)]]`) + } +} + +// TestTimeFunctionWithPrecision tests that inserting data with NOW(1) works as intended. +func TestTimeFunctionWithPrecision(t *testing.T) { + mcmp, closer := start(t) + defer closer() + + mcmp.Exec("insert into t1(id1, id2) values (1, NOW(1))") + mcmp.Exec("insert into t1(id1, id2) values (2, NOW(2))") + mcmp.Exec("insert into t1(id1, id2) values (3, NOW())") } func TestHexVals(t *testing.T) { diff --git a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go index b6495443a8e..735a26fc00c 100644 --- a/go/test/endtoend/vtgate/queries/normalize/normalize_test.go +++ b/go/test/endtoend/vtgate/queries/normalize/normalize_test.go @@ -28,6 +28,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/utils" "vitess.io/vitess/go/mysql" @@ -40,6 +41,11 @@ func TestNormalizeAllFields(t *testing.T) { insertQuery := `insert into t1 values (1, "chars", "variable chars", x'73757265', 0x676F, 0.33, 9.99, 1, "1976-06-08", "small", "b", "{\"key\":\"value\"}", point(1,5), b'011', 0b0101)` normalizedInsertQuery := `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* BITNUM */, :vtg16 /* BITNUM */)` + vtgateVersion, err := cluster.GetMajorVersion("vtgate") + require.NoError(t, err) + if vtgateVersion < 19 { + normalizedInsertQuery = `insert into t1 values (:vtg1 /* INT64 */, :vtg2 /* VARCHAR */, :vtg3 /* VARCHAR */, :vtg4 /* HEXVAL */, :vtg5 /* HEXNUM */, :vtg6 /* DECIMAL */, :vtg7 /* DECIMAL */, :vtg8 /* INT64 */, :vtg9 /* VARCHAR */, :vtg10 /* VARCHAR */, :vtg11 /* VARCHAR */, :vtg12 /* VARCHAR */, point(:vtg13 /* INT64 */, :vtg14 /* INT64 */), :vtg15 /* HEXNUM */, :vtg16 /* HEXNUM */)` + } selectQuery := "select * from t1" utils.Exec(t, conn, insertQuery) qr := utils.Exec(t, conn, selectQuery) diff --git a/go/test/endtoend/vtgate/queries/random/simplifier_test.go b/go/test/endtoend/vtgate/queries/random/simplifier_test.go index 2be9ef8ab93..13f4f891c7b 100644 --- a/go/test/endtoend/vtgate/queries/random/simplifier_test.go +++ b/go/test/endtoend/vtgate/queries/random/simplifier_test.go @@ -88,13 +88,13 @@ func simplifyResultsMismatchedQuery(t *testing.T, query string) string { formal, err := vindexes.LoadFormal("svschema.json") require.NoError(t, err) - vSchema := vindexes.BuildVSchema(formal) + vSchema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) vSchemaWrapper := &vschemawrapper.VSchemaWrapper{ V: vSchema, Version: planbuilder.Gen4, } - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) simplified := simplifier.SimplifyStatement( diff --git a/go/test/fuzzing/ast_fuzzer.go b/go/test/fuzzing/ast_fuzzer.go index 118f044ea66..5951a0da9eb 100644 --- a/go/test/fuzzing/ast_fuzzer.go +++ b/go/test/fuzzing/ast_fuzzer.go @@ -36,11 +36,11 @@ func FuzzEqualsSQLNode(data []byte) int { if err != nil { return 0 } - inA, err := sqlparser.Parse(query1) + inA, err := sqlparser.NewTestParser().Parse(query1) if err != nil { return 0 } - inB, err := sqlparser.Parse(query2) + inB, err := sqlparser.NewTestParser().Parse(query2) if err != nil { return 0 } diff --git a/go/test/fuzzing/parser_fuzzer.go b/go/test/fuzzing/parser_fuzzer.go index 67b8a30ef00..04a37e6dbcb 100644 --- a/go/test/fuzzing/parser_fuzzer.go +++ b/go/test/fuzzing/parser_fuzzer.go @@ -42,7 +42,7 @@ func FuzzNormalizer(data []byte) int { } func FuzzParser(data []byte) int { - _, err := sqlparser.Parse(string(data)) + _, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -55,7 +55,7 @@ func FuzzNodeFormat(data []byte) int { if err != nil { return 0 } - node, err := sqlparser.Parse(query) + node, err := sqlparser.NewTestParser().Parse(query) if err != nil { return 0 } @@ -69,6 +69,6 @@ func FuzzNodeFormat(data []byte) int { } func FuzzSplitStatementToPieces(data []byte) int { - _, _ = sqlparser.SplitStatementToPieces(string(data)) + _, _ = sqlparser.NewTestParser().SplitStatementToPieces(string(data)) return 1 } diff --git a/go/test/fuzzing/tabletserver_schema_fuzzer.go b/go/test/fuzzing/tabletserver_schema_fuzzer.go index 39af22a1918..655d0fb1606 100644 --- a/go/test/fuzzing/tabletserver_schema_fuzzer.go +++ b/go/test/fuzzing/tabletserver_schema_fuzzer.go @@ -19,8 +19,11 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -58,14 +61,14 @@ func FuzzLoadTable(data []byte) int { func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Table, error) { ctx := context.Background() - appParams := db.ConnParams() - dbaParams := db.ConnParams() + appParams := dbconfigs.New(db.ConnParams()) + dbaParams := dbconfigs.New(db.ConnParams()) cfg := tabletenv.ConnPoolConfig{ Size: 2, IdleTimeout: 10 * time.Second, } - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser()), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -73,5 +76,5 @@ func newTestLoadTable(tableName, comment string, db *fakesqldb.DB) (*schema.Tabl } defer conn.Recycle() - return schema.LoadTable(conn, "fakesqldb", tableName, "BASE_TABLE", comment) + return schema.LoadTable(conn, "fakesqldb", tableName, "BASE_TABLE", comment, collations.MySQL8()) } diff --git a/go/test/fuzzing/vt_schema_fuzzer.go b/go/test/fuzzing/vt_schema_fuzzer.go index 2092eac866a..79a30d3394a 100644 --- a/go/test/fuzzing/vt_schema_fuzzer.go +++ b/go/test/fuzzing/vt_schema_fuzzer.go @@ -26,7 +26,7 @@ import ( // FuzzOnlineDDLFromCommentedStatement implements a fuzzer // that targets schema.OnlineDDLFromCommentedStatement func FuzzOnlineDDLFromCommentedStatement(data []byte) int { - stmt, err := sqlparser.Parse(string(data)) + stmt, err := sqlparser.NewTestParser().Parse(string(data)) if err != nil { return 0 } @@ -75,7 +75,7 @@ func FuzzNewOnlineDDLs(data []byte) int { return 0 } - onlineDDLs, err := schema.NewOnlineDDLs(keyspace, sql, ddlStmt, ddlStrategySetting, requestContext) + onlineDDLs, err := schema.NewOnlineDDLs(sql, ddlStmt, ddlStrategySetting, requestContext, keyspace) if err != nil { return 0 } diff --git a/go/test/fuzzing/vtctl_fuzzer.go b/go/test/fuzzing/vtctl_fuzzer.go index 82fdaa572de..ee9cf8a6b4b 100644 --- a/go/test/fuzzing/vtctl_fuzzer.go +++ b/go/test/fuzzing/vtctl_fuzzer.go @@ -20,7 +20,9 @@ import ( "context" "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl" @@ -180,7 +182,7 @@ func Fuzz(data []byte) int { // Add params to the command commandSlice = append(commandSlice, args...) - _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc), commandSlice) + _ = vtctl.RunCommand(ctx, wrangler.New(logger, topo, tmc, collations.MySQL8(), sqlparser.NewTestParser()), commandSlice) command++ } diff --git a/go/test/vschemawrapper/vschema_wrapper.go b/go/test/vschemawrapper/vschema_wrapper.go index 85d9840c3f7..21617dcdaee 100644 --- a/go/test/vschemawrapper/vschema_wrapper.go +++ b/go/test/vschemawrapper/vschema_wrapper.go @@ -82,7 +82,7 @@ func (vw *VSchemaWrapper) PlanPrepareStatement(ctx context.Context, query string if err != nil { return nil, nil, err } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := vw.SQLParser().Parse2(query) if err != nil { return nil, nil, err } @@ -123,7 +123,15 @@ func (vw *VSchemaWrapper) GetSrvVschema() *vschemapb.SrvVSchema { } func (vw *VSchemaWrapper) ConnCollation() collations.ID { - return collations.CollationUtf8mb3ID + return collations.CollationUtf8mb4ID +} + +func (vw *VSchemaWrapper) CollationEnv() *collations.Environment { + return collations.MySQL8() +} + +func (vw *VSchemaWrapper) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() } func (vw *VSchemaWrapper) PlannerWarning(_ string) { diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go index 1811ff72511..3f59fdb3ece 100644 --- a/go/tools/asthelpergen/asthelpergen.go +++ b/go/tools/asthelpergen/asthelpergen.go @@ -29,7 +29,6 @@ import ( "golang.org/x/tools/go/packages" "vitess.io/vitess/go/textutil" - "vitess.io/vitess/go/tools/codegen" ) diff --git a/go/vt/binlog/binlog_streamer_rbr_test.go b/go/vt/binlog/binlog_streamer_rbr_test.go index d8481ca0665..1678b086719 100644 --- a/go/vt/binlog/binlog_streamer_rbr_test.go +++ b/go/vt/binlog/binlog_streamer_rbr_test.go @@ -53,7 +53,7 @@ func TestStreamerParseRBREvents(t *testing.T) { }, { Name: "message", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, }) @@ -302,7 +302,7 @@ func TestStreamerParseRBRNameEscapes(t *testing.T) { }, { Name: "delete", Type: querypb.Type_VARCHAR, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, }) diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index 5b9d2e40e1e..f651f3bb25c 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -660,13 +660,6 @@ func GenerateUpdateTimeThrottled(uid int32, timeThrottledUnix int64, componentTh return fmt.Sprintf("update _vt.vreplication set time_updated=%v, time_throttled=%v, component_throttled='%v' where id=%v", timeThrottledUnix, timeThrottledUnix, componentThrottled, uid), nil } -// StartVReplication returns a statement to start the replication. -func StartVReplication(uid int32) string { - return fmt.Sprintf( - "update _vt.vreplication set state='%v', stop_pos=NULL where id=%v", - binlogdatapb.VReplicationWorkflowState_Running.String(), uid) -} - // StartVReplicationUntil returns a statement to start the replication with a stop position. func StartVReplicationUntil(uid int32, pos string) string { return fmt.Sprintf( diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index ce2ccaccb17..bc96e690b76 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -46,6 +46,7 @@ type DBClient interface { type dbClientImpl struct { dbConfig dbconfigs.Connector dbConn *mysql.Conn + parser *sqlparser.Parser } // dbClientImplWithSidecarDBReplacement is a DBClient implementation @@ -57,14 +58,15 @@ type dbClientImplWithSidecarDBReplacement struct { } // NewDBClient creates a DBClient instance -func NewDBClient(params dbconfigs.Connector) DBClient { +func NewDBClient(params dbconfigs.Connector, parser *sqlparser.Parser) DBClient { if sidecar.GetName() != sidecar.DefaultName { return &dbClientImplWithSidecarDBReplacement{ - dbClientImpl{dbConfig: params}, + dbClientImpl{dbConfig: params, parser: parser}, } } return &dbClientImpl{ dbConfig: params, + parser: parser, } } @@ -163,7 +165,7 @@ func (dc *dbClientImpl) ExecuteFetchMulti(query string, maxrows int) ([]*sqltype func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := dcr.parser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -172,19 +174,17 @@ func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetch(query string, maxr func (dcr *dbClientImplWithSidecarDBReplacement) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { // Replace any provided sidecar database qualifiers with the correct one. - qps, err := sqlparser.SplitStatementToPieces(query) + qps, err := dcr.parser.SplitStatementToPieces(query) if err != nil { return nil, err } for i, qp := range qps { - uq, err := sqlparser.ReplaceTableQualifiers(qp, sidecar.DefaultName, sidecar.GetName()) + uq, err := dcr.parser.ReplaceTableQualifiers(qp, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } qps[i] = uq } - if err != nil { - return nil, err - } + return dcr.dbClientImpl.ExecuteFetchMulti(strings.Join(qps, ";"), maxrows) } diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index ce07fbe9179..abc170ed493 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -42,6 +42,7 @@ type MockDBClient struct { done chan struct{} invariants map[string]*sqltypes.Result Tag string + parser *sqlparser.Parser } type mockExpect struct { @@ -84,15 +85,17 @@ func NewMockDBClient(t *testing.T) *MockDBClient { "set @@session.sql_mode": {}, "set sql_mode": {}, }, + parser: sqlparser.NewTestParser(), } } // NewMockDbaClient returns a new DBClientMock with the default "Dba" UName. func NewMockDbaClient(t *testing.T) *MockDBClient { return &MockDBClient{ - t: t, - UName: mockClientUNameDba, - done: make(chan struct{}), + t: t, + UName: mockClientUNameDba, + done: make(chan struct{}), + parser: sqlparser.NewTestParser(), } } @@ -227,7 +230,7 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re } func (dc *MockDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := dc.parser.SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/binlog/keyspace_id_resolver.go b/go/vt/binlog/keyspace_id_resolver.go index 6903ba53b71..1ca198760a3 100644 --- a/go/vt/binlog/keyspace_id_resolver.go +++ b/go/vt/binlog/keyspace_id_resolver.go @@ -17,13 +17,13 @@ limitations under the License. package binlog import ( + "context" "fmt" "strings" - "context" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -47,13 +47,13 @@ type keyspaceIDResolverFactory func(*schema.Table) (int, keyspaceIDResolver, err // newKeyspaceIDResolverFactory creates a new // keyspaceIDResolverFactory for the provided keyspace and cell. -func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { - return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell) +func newKeyspaceIDResolverFactory(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { + return newKeyspaceIDResolverFactoryV3(ctx, ts, keyspace, cell, parser) } // newKeyspaceIDResolverFactoryV3 finds the SrvVSchema in the cell, // gets the keyspace part, and uses it to find the column name. -func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string) (keyspaceIDResolverFactory, error) { +func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspace string, cell string, parser *sqlparser.Parser) (keyspaceIDResolverFactory, error) { srvVSchema, err := ts.GetSrvVSchema(ctx, cell) if err != nil { return nil, err @@ -62,7 +62,7 @@ func newKeyspaceIDResolverFactoryV3(ctx context.Context, ts *topo.Server, keyspa if !ok { return nil, fmt.Errorf("SrvVSchema has no entry for keyspace %v", keyspace) } - keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace) + keyspaceSchema, err := vindexes.BuildKeyspaceSchema(kschema, keyspace, parser) if err != nil { return nil, fmt.Errorf("cannot build vschema for keyspace %v: %v", keyspace, err) } diff --git a/go/vt/binlog/updatestreamctl.go b/go/vt/binlog/updatestreamctl.go index 78d61c0860c..4397eccd4da 100644 --- a/go/vt/binlog/updatestreamctl.go +++ b/go/vt/binlog/updatestreamctl.go @@ -27,6 +27,7 @@ import ( "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -70,47 +71,6 @@ type UpdateStreamControl interface { IsEnabled() bool } -// UpdateStreamControlMock is an implementation of UpdateStreamControl -// to be used in tests -type UpdateStreamControlMock struct { - enabled bool - sync.Mutex -} - -// NewUpdateStreamControlMock creates a new UpdateStreamControlMock -func NewUpdateStreamControlMock() *UpdateStreamControlMock { - return &UpdateStreamControlMock{} -} - -// InitDBConfig is part of UpdateStreamControl -func (m *UpdateStreamControlMock) InitDBConfig(*dbconfigs.DBConfigs) { -} - -// RegisterService is part of UpdateStreamControl -func (m *UpdateStreamControlMock) RegisterService() { -} - -// Enable is part of UpdateStreamControl -func (m *UpdateStreamControlMock) Enable() { - m.Lock() - m.enabled = true - m.Unlock() -} - -// Disable is part of UpdateStreamControl -func (m *UpdateStreamControlMock) Disable() { - m.Lock() - m.enabled = false - m.Unlock() -} - -// IsEnabled is part of UpdateStreamControl -func (m *UpdateStreamControlMock) IsEnabled() bool { - m.Lock() - defer m.Unlock() - return m.enabled -} - // UpdateStreamImpl is the real implementation of UpdateStream // and UpdateStreamControl type UpdateStreamImpl struct { @@ -126,6 +86,7 @@ type UpdateStreamImpl struct { state atomic.Int64 stateWaitGroup sync.WaitGroup streams StreamList + parser *sqlparser.Parser } // StreamList is a map of context.CancelFunc to mass-interrupt ongoing @@ -179,12 +140,13 @@ type RegisterUpdateStreamServiceFunc func(UpdateStream) var RegisterUpdateStreamServices []RegisterUpdateStreamServiceFunc // NewUpdateStream returns a new UpdateStreamImpl object -func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine) *UpdateStreamImpl { +func NewUpdateStream(ts *topo.Server, keyspace string, cell string, se *schema.Engine, parser *sqlparser.Parser) *UpdateStreamImpl { return &UpdateStreamImpl{ ts: ts, keyspace: keyspace, cell: cell, se: se, + parser: parser, } } @@ -275,7 +237,7 @@ func (updateStream *UpdateStreamImpl) StreamKeyRange(ctx context.Context, positi return callback(trans) }) bls := NewStreamer(updateStream.cp, updateStream.se, charset, pos, 0, f) - bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell) + bls.resolverFactory, err = newKeyspaceIDResolverFactory(ctx, updateStream.ts, updateStream.keyspace, updateStream.cell, updateStream.parser) if err != nil { return fmt.Errorf("newKeyspaceIDResolverFactory failed: %v", err) } diff --git a/go/vt/dbconfigs/dbconfigs.go b/go/vt/dbconfigs/dbconfigs.go index fe3a228835c..82c322e7ae9 100644 --- a/go/vt/dbconfigs/dbconfigs.go +++ b/go/vt/dbconfigs/dbconfigs.go @@ -26,13 +26,13 @@ import ( "github.com/spf13/pflag" - "vitess.io/vitess/go/vt/servenv" - "vitess.io/vitess/go/vt/vttls" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttls" "vitess.io/vitess/go/yaml2" ) @@ -123,7 +123,7 @@ func RegisterFlags(userKeys ...string) { servenv.OnParse(func(fs *pflag.FlagSet) { registerBaseFlags(fs) for _, userKey := range userKeys { - uc, cp := GlobalDBConfigs.getParams(userKey, &GlobalDBConfigs) + uc, cp := GlobalDBConfigs.getParams(userKey) registerPerUserFlags(fs, userKey, uc, cp) } }) @@ -318,9 +318,9 @@ func (dbcfgs *DBConfigs) Clone() *DBConfigs { // parameters. This is only for legacy support. // If no per-user parameters are supplied, then the defaultSocketFile // is used to initialize the per-user conn params. -func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { +func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string, collationEnv *collations.Environment) { for _, userKey := range All { - uc, cp := dbcfgs.getParams(userKey, dbcfgs) + uc, cp := dbcfgs.getParams(userKey) // TODO @rafael: For ExternalRepl we need to respect the provided host / port // At the moment this is an snowflake user connection type that it used by // vreplication to connect to external mysql hosts that are not part of a vitess @@ -338,8 +338,13 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { // If the connection params has a charset defined, it will not be overridden by the // global configuration. - if dbcfgs.Charset != "" && cp.Charset == "" { - cp.Charset = dbcfgs.Charset + if dbcfgs.Charset != "" && cp.Charset == collations.Unknown { + ch, err := collationEnv.ParseConnectionCharset(dbcfgs.Charset) + if err != nil { + log.Warningf("Error parsing charset %s: %v", dbcfgs.Charset, err) + ch = collationEnv.DefaultConnectionCharset() + } + cp.Charset = ch } if dbcfgs.Flags != 0 { @@ -367,7 +372,7 @@ func (dbcfgs *DBConfigs) InitWithSocket(defaultSocketFile string) { log.Infof("DBConfigs: %v\n", dbcfgs.String()) } -func (dbcfgs *DBConfigs) getParams(userKey string, dbc *DBConfigs) (*UserConfig, *mysql.ConnParams) { +func (dbcfgs *DBConfigs) getParams(userKey string) (*UserConfig, *mysql.ConnParams) { var uc *UserConfig var cp *mysql.ConnParams switch userKey { diff --git a/go/vt/dbconfigs/dbconfigs_test.go b/go/vt/dbconfigs/dbconfigs_test.go index a97f2526c17..029682d13b7 100644 --- a/go/vt/dbconfigs/dbconfigs_test.go +++ b/go/vt/dbconfigs/dbconfigs_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/yaml2" ) @@ -36,10 +37,10 @@ func TestInit(t *testing.T) { dbaParams: mysql.ConnParams{Host: "host"}, Charset: "utf8", } - dbConfigs.InitWithSocket("default") - assert.Equal(t, mysql.ConnParams{UnixSocket: "socket", Charset: "utf8"}, dbConfigs.appParams) - assert.Equal(t, mysql.ConnParams{Host: "host", Charset: "utf8"}, dbConfigs.dbaParams) - assert.Equal(t, mysql.ConnParams{UnixSocket: "default", Charset: "utf8"}, dbConfigs.appdebugParams) + dbConfigs.InitWithSocket("default", collations.MySQL8()) + assert.Equal(t, mysql.ConnParams{UnixSocket: "socket", Charset: collations.CollationUtf8mb3ID}, dbConfigs.appParams) + assert.Equal(t, mysql.ConnParams{Host: "host", Charset: collations.CollationUtf8mb3ID}, dbConfigs.dbaParams) + assert.Equal(t, mysql.ConnParams{UnixSocket: "default", Charset: collations.CollationUtf8mb3ID}, dbConfigs.appdebugParams) dbConfigs = DBConfigs{ Host: "a", @@ -72,7 +73,7 @@ func TestInit(t *testing.T) { Host: "host", }, } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want := mysql.ConnParams{ Host: "a", @@ -80,7 +81,7 @@ func TestInit(t *testing.T) { Uname: "app", Pass: "apppass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", ConnectTimeoutMs: 250, @@ -91,7 +92,7 @@ func TestInit(t *testing.T) { Host: "a", Port: 1, UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", SslCa: "d", @@ -107,7 +108,7 @@ func TestInit(t *testing.T) { Uname: "dba", Pass: "dbapass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, Flags: 2, Flavor: "flavor", SslCa: "d", @@ -143,21 +144,21 @@ func TestInit(t *testing.T) { }, appParams: mysql.ConnParams{ UnixSocket: "socket", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, }, dbaParams: mysql.ConnParams{ Host: "host", Flags: 2, }, } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want = mysql.ConnParams{ Host: "a", Port: 1, Uname: "app", Pass: "apppass", UnixSocket: "b", - Charset: "utf8mb4", + Charset: collations.CollationUtf8mb4ID, } assert.Equal(t, want, dbConfigs.appParams) want = mysql.ConnParams{ @@ -168,7 +169,7 @@ func TestInit(t *testing.T) { SslCaPath: "e", SslCert: "f", SslKey: "g", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.appdebugParams) want = mysql.ConnParams{ @@ -182,7 +183,7 @@ func TestInit(t *testing.T) { SslCaPath: "e", SslCert: "f", SslKey: "g", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.dbaParams) } @@ -201,13 +202,13 @@ func TestUseTCP(t *testing.T) { }, Charset: "utf8", } - dbConfigs.InitWithSocket("default") + dbConfigs.InitWithSocket("default", collations.MySQL8()) want := mysql.ConnParams{ Host: "a", Port: 1, Uname: "app", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.appParams) @@ -216,7 +217,7 @@ func TestUseTCP(t *testing.T) { Port: 1, Uname: "dba", UnixSocket: "b", - Charset: "utf8", + Charset: collations.CollationUtf8mb3ID, } assert.Equal(t, want, dbConfigs.dbaParams) } diff --git a/go/vt/key/destination.go b/go/vt/key/destination.go index be95406cca7..6b8f145390b 100644 --- a/go/vt/key/destination.go +++ b/go/vt/key/destination.go @@ -154,40 +154,6 @@ func processExactKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "keyrange %v does not exactly match shards", KeyRangeString(kr)) } -// -// DestinationExactKeyRanges -// - -// DestinationExactKeyRanges is the destination for multiple KeyRanges. -// The KeyRanges must map exactly to one or more shards, and cannot -// start or end in the middle of a shard. -// It implements the Destination interface. -type DestinationExactKeyRanges []*topodatapb.KeyRange - -// Resolve is part of the Destination interface. -func (d DestinationExactKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { - for _, kr := range d { - if err := processExactKeyRange(allShards, kr, addShard); err != nil { - return err - } - } - return nil -} - -// String is part of the Destination interface. -func (d DestinationExactKeyRanges) String() string { - var buffer strings.Builder - buffer.WriteString("DestinationExactKeyRanges(") - for i, kr := range d { - if i > 0 { - buffer.WriteByte(',') - } - buffer.WriteString(KeyRangeString(kr)) - } - buffer.WriteByte(')') - return buffer.String() -} - // // DestinationKeyRange // @@ -225,38 +191,6 @@ func processKeyRange(allShards []*topodatapb.ShardReference, kr *topodatapb.KeyR return nil } -// -// DestinationKeyRanges -// - -// DestinationKeyRanges is the destination for multiple KeyRanges. -// It implements the Destination interface. -type DestinationKeyRanges []*topodatapb.KeyRange - -// Resolve is part of the Destination interface. -func (d DestinationKeyRanges) Resolve(allShards []*topodatapb.ShardReference, addShard func(shard string) error) error { - for _, kr := range d { - if err := processKeyRange(allShards, kr, addShard); err != nil { - return err - } - } - return nil -} - -// String is part of the Destination interface. -func (d DestinationKeyRanges) String() string { - var buffer strings.Builder - buffer.WriteString("DestinationKeyRanges(") - for i, kr := range d { - if i > 0 { - buffer.WriteByte(',') - } - buffer.WriteString(KeyRangeString(kr)) - } - buffer.WriteByte(')') - return buffer.String() -} - // // DestinationKeyspaceID // diff --git a/go/vt/mysqlctl/cmd.go b/go/vt/mysqlctl/cmd.go index 222a39e26ee..cd4fd42f181 100644 --- a/go/vt/mysqlctl/cmd.go +++ b/go/vt/mysqlctl/cmd.go @@ -23,12 +23,13 @@ package mysqlctl import ( "fmt" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" ) // CreateMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL // installation that hasn't been set up yet. -func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) (*Mysqld, *Mycnf, error) { +func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int, collationEnv *collations.Environment) (*Mysqld, *Mycnf, error) { mycnf := NewMycnf(tabletUID, mysqlPort) // Choose a random MySQL server-id, since this is a fresh data dir. // We don't want to use the tablet UID as the MySQL server-id, @@ -46,20 +47,20 @@ func CreateMysqldAndMycnf(tabletUID uint32, mysqlSocket string, mysqlPort int) ( mycnf.SocketFile = mysqlSocket } - dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile, collationEnv) return NewMysqld(&dbconfigs.GlobalDBConfigs), mycnf, nil } // OpenMysqldAndMycnf returns a Mysqld and a Mycnf object to use for working with a MySQL // installation that already exists. The Mycnf will be built based on the my.cnf file // of the MySQL instance. -func OpenMysqldAndMycnf(tabletUID uint32) (*Mysqld, *Mycnf, error) { +func OpenMysqldAndMycnf(tabletUID uint32, collationEnv *collations.Environment) (*Mysqld, *Mycnf, error) { // We pass a port of 0, this will be read and overwritten from the path on disk mycnf, err := ReadMycnf(NewMycnf(tabletUID, 0), 0) if err != nil { return nil, nil, fmt.Errorf("couldn't read my.cnf file: %v", err) } - dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(mycnf.SocketFile, collationEnv) return NewMysqld(&dbconfigs.GlobalDBConfigs), mycnf, nil } diff --git a/go/vt/mysqlctl/fakemysqldaemon.go b/go/vt/mysqlctl/fakemysqldaemon.go index 521a839bf78..33a553a25e9 100644 --- a/go/vt/mysqlctl/fakemysqldaemon.go +++ b/go/vt/mysqlctl/fakemysqldaemon.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/dbconnpool" "vitess.io/vitess/go/vt/mysqlctl/tmutils" @@ -196,7 +197,7 @@ func NewFakeMysqlDaemon(db *fakesqldb.DB) *FakeMysqlDaemon { } if db != nil { result.appPool = dbconnpool.NewConnectionPool("AppConnPool", nil, 5, time.Minute, 0, 0) - result.appPool.Open(db.ConnParams()) + result.appPool.Open(dbconfigs.New(db.ConnParams())) } return result } @@ -653,12 +654,12 @@ func (fmd *FakeMysqlDaemon) GetAppConnection(ctx context.Context) (*dbconnpool.P // GetDbaConnection is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetDbaConnection(ctx context.Context) (*dbconnpool.DBConnection, error) { - return dbconnpool.NewDBConnection(ctx, fmd.db.ConnParams()) + return dbconnpool.NewDBConnection(ctx, dbconfigs.New(fmd.db.ConnParams())) } // GetAllPrivsConnection is part of the MysqlDaemon interface. func (fmd *FakeMysqlDaemon) GetAllPrivsConnection(ctx context.Context) (*dbconnpool.DBConnection, error) { - return dbconnpool.NewDBConnection(ctx, fmd.db.ConnParams()) + return dbconnpool.NewDBConnection(ctx, dbconfigs.New(fmd.db.ConnParams())) } // SetSemiSyncEnabled is part of the MysqlDaemon interface. diff --git a/go/vt/mysqlctl/mycnf.go b/go/vt/mysqlctl/mycnf.go index dad91e20fed..7ae2d5d0aa9 100644 --- a/go/vt/mysqlctl/mycnf.go +++ b/go/vt/mysqlctl/mycnf.go @@ -178,9 +178,7 @@ func ReadMycnf(mycnf *Mycnf, waitTime time.Duration) (*Mycnf, error) { defer f.Close() buf := bufio.NewReader(f) - if err != nil { - return nil, err - } + mycnf.mycnfMap = make(map[string]string) var lval, rval string var parts [][]byte diff --git a/go/vt/mysqlctl/mycnf_test.go b/go/vt/mysqlctl/mycnf_test.go index fc54f063618..7b8c2b1ddf0 100644 --- a/go/vt/mysqlctl/mycnf_test.go +++ b/go/vt/mysqlctl/mycnf_test.go @@ -26,6 +26,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/servenv" ) @@ -113,7 +115,7 @@ func NoTestMycnfHook(t *testing.T) { // this is not being passed, so it should be nil os.Setenv("MY_VAR", "myvalue") - dbconfigs.GlobalDBConfigs.InitWithSocket(cnf.SocketFile) + dbconfigs.GlobalDBConfigs.InitWithSocket(cnf.SocketFile, collations.MySQL8()) mysqld := NewMysqld(&dbconfigs.GlobalDBConfigs) servenv.OnClose(mysqld.Close) diff --git a/go/vt/mysqlctl/mysql_daemon.go b/go/vt/mysqlctl/mysql_daemon.go index 66454e8b8a8..9e8baebefd6 100644 --- a/go/vt/mysqlctl/mysql_daemon.go +++ b/go/vt/mysqlctl/mysql_daemon.go @@ -94,7 +94,6 @@ type MysqlDaemon interface { GetSchema(ctx context.Context, dbName string, request *tabletmanagerdatapb.GetSchemaRequest) (*tabletmanagerdatapb.SchemaDefinition, error) GetColumns(ctx context.Context, dbName, table string) ([]*querypb.Field, []string, error) GetPrimaryKeyColumns(ctx context.Context, dbName, table string) ([]string, error) - GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName, table string) ([]string, string, error) PreflightSchemaChange(ctx context.Context, dbName string, changes []string) ([]*tabletmanagerdatapb.SchemaChangeResult, error) ApplySchemaChange(ctx context.Context, dbName string, change *tmutils.SchemaChange) (*tabletmanagerdatapb.SchemaChangeResult, error) diff --git a/go/vt/mysqlctl/replication.go b/go/vt/mysqlctl/replication.go index 1dd03d901cb..af20bbef85f 100644 --- a/go/vt/mysqlctl/replication.go +++ b/go/vt/mysqlctl/replication.go @@ -237,7 +237,8 @@ func (mysqld *Mysqld) IsSuperReadOnly() (bool, error) { if err != nil { return false, err } - if err == nil && len(qr.Rows) == 1 { + + if len(qr.Rows) == 1 { sro := qr.Rows[0][0].ToString() if sro == "1" || sro == "ON" { return true, nil diff --git a/go/vt/mysqlctl/schema.go b/go/vt/mysqlctl/schema.go index 6f1c7c19570..f3325827ab9 100644 --- a/go/vt/mysqlctl/schema.go +++ b/go/vt/mysqlctl/schema.go @@ -579,13 +579,7 @@ func (mysqld *Mysqld) ApplySchemaChange(ctx context.Context, dbName string, chan // defined PRIMARY KEY then it may return the columns for // that index if it is likely the most efficient one amongst // the available PKE indexes on the table. -func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName, table string) ([]string, string, error) { - conn, err := getPoolReconnect(ctx, mysqld.dbaPool) - if err != nil { - return nil, "", err - } - defer conn.Recycle() - +func GetPrimaryKeyEquivalentColumns(ctx context.Context, exec func(string, int, bool) (*sqltypes.Result, error), dbName, table string) ([]string, string, error) { // We use column name aliases to guarantee lower case for our named results. sql := ` SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS AS index_cols INNER JOIN @@ -629,7 +623,7 @@ func (mysqld *Mysqld) GetPrimaryKeyEquivalentColumns(ctx context.Context, dbName encodedDbName := encodeEntityName(dbName) encodedTable := encodeEntityName(table) sql = fmt.Sprintf(sql, encodedDbName, encodedTable, encodedDbName, encodedTable, encodedDbName, encodedTable) - qr, err := conn.Conn.ExecuteFetch(sql, 1000, true) + qr, err := exec(sql, 1000, true) if err != nil { return nil, "", err } diff --git a/go/vt/mysqlctl/tmutils/schema.go b/go/vt/mysqlctl/tmutils/schema.go index aae529f89b0..781b943a4b2 100644 --- a/go/vt/mysqlctl/tmutils/schema.go +++ b/go/vt/mysqlctl/tmutils/schema.go @@ -40,31 +40,6 @@ const ( TableView = "VIEW" ) -// TableDefinitionGetColumn returns the index of a column inside a -// TableDefinition. -func TableDefinitionGetColumn(td *tabletmanagerdatapb.TableDefinition, name string) (index int, ok bool) { - lowered := strings.ToLower(name) - for i, n := range td.Columns { - if lowered == strings.ToLower(n) { - return i, true - } - } - return -1, false -} - -// TableDefinitions is a list of TableDefinition, for sorting -type TableDefinitions []*tabletmanagerdatapb.TableDefinition - -// Len returns TableDefinitions length. -func (tds TableDefinitions) Len() int { - return len(tds) -} - -// Swap used for sorting TableDefinitions. -func (tds TableDefinitions) Swap(i, j int) { - tds[i], tds[j] = tds[j], tds[i] -} - // TableFilter is a filter for table names and types. type TableFilter struct { includeViews bool diff --git a/go/vt/mysqlctl/utils.go b/go/vt/mysqlctl/utils.go deleted file mode 100644 index cc34be6abfe..00000000000 --- a/go/vt/mysqlctl/utils.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctl - -import ( - "vitess.io/vitess/go/vt/log" -) - -type MapFunc func(index int) error - -// ConcurrentMap applies fun in a concurrent manner on integers from 0 -// to n-1 (they are assumed to be indexes of some slice containing -// items to be processed). The first error returned by a fun -// application will returned (subsequent errors will only be -// logged). It will use concurrency goroutines. -func ConcurrentMap(concurrency, n int, fun MapFunc) error { - errors := make(chan error) - work := make(chan int, n) - - for i := 0; i < n; i++ { - work <- i - } - close(work) - - for j := 0; j < concurrency; j++ { - go func() { - for i := range work { - errors <- fun(i) - } - }() - } - var err error - - for i := 0; i < n; i++ { - if e := <-errors; e != nil { - if err != nil { - log.Errorf("multiple errors, this one happened but it won't be returned: %v", err) - } - err = e - } - } - return err -} diff --git a/go/vt/mysqlctl/utils_test.go b/go/vt/mysqlctl/utils_test.go deleted file mode 100644 index 0fdcae92bfa..00000000000 --- a/go/vt/mysqlctl/utils_test.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package mysqlctl - -import ( - "errors" - "testing" -) - -func TestConcurrentMap(t *testing.T) { - work := make([]int, 10) - result := make([]int, 10) - for i := 0; i < 10; i++ { - work[i] = i - } - mapFunc := func(i int) error { - result[i] = work[i] - return nil - } - if err := ConcurrentMap(2, 10, mapFunc); err != nil { - t.Errorf("Unexpected error: %v", err) - } - - for i := 0; i < 10; i++ { - if got, expected := result[i], work[i]; got != expected { - t.Errorf("Wrong values in result: got %v, expected %v", got, expected) - } - } - fooErr := errors.New("foo") - if err := ConcurrentMap(2, 10, func(i int) error { return fooErr }); err != fooErr { - t.Errorf("Didn't get expected error: %v", err) - } -} diff --git a/go/vt/schema/online_ddl.go b/go/vt/schema/online_ddl.go index a06866e996a..3b28a4b9e2e 100644 --- a/go/vt/schema/online_ddl.go +++ b/go/vt/schema/online_ddl.go @@ -108,17 +108,10 @@ type OnlineDDL struct { WasReadyToComplete int64 `json:"was_ready_to_complete,omitempty"` } -// FromJSON creates an OnlineDDL from json -func FromJSON(bytes []byte) (*OnlineDDL, error) { - onlineDDL := &OnlineDDL{} - err := json.Unmarshal(bytes, onlineDDL) - return onlineDDL, err -} - // ParseOnlineDDLStatement parses the given SQL into a statement and returns the action type of the DDL statement, or error // if the statement is not a DDL -func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { - stmt, err := sqlparser.Parse(sql) +func ParseOnlineDDLStatement(sql string, parser *sqlparser.Parser) (ddlStmt sqlparser.DDLStatement, action sqlparser.DDLAction, err error) { + stmt, err := parser.Parse(sql) if err != nil { return nil, 0, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "error parsing statement: SQL=%s, error=%+v", sql, err) } @@ -129,10 +122,10 @@ func ParseOnlineDDLStatement(sql string) (ddlStmt sqlparser.DDLStatement, action return ddlStmt, action, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unsupported query type: %s", sql) } -func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting) error { +func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, parser *sqlparser.Parser) error { // SQL statement sanity checks: if !ddlStmt.IsFullyParsed() { - if _, err := sqlparser.ParseStrictDDL(sql); err != nil { + if _, err := parser.ParseStrictDDL(sql); err != nil { // More information about the reason why the statement is not fully parsed: return vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.SyntaxError, "%v", err) } @@ -154,12 +147,12 @@ func onlineDDLStatementSanity(sql string, ddlStmt sqlparser.DDLStatement, ddlStr } // NewOnlineDDLs takes a single DDL statement, normalizes it (potentially break down into multiple statements), and generates one or more OnlineDDL instances, one for each normalized statement -func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDLs [](*OnlineDDL), err error) { +func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDLs []*OnlineDDL, err error) { appendOnlineDDL := func(tableName string, ddlStmt sqlparser.DDLStatement) error { - if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting); err != nil { + if err := onlineDDLStatementSanity(sql, ddlStmt, ddlStrategySetting, parser); err != nil { return err } - onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID) + onlineDDL, err := NewOnlineDDL(keyspace, tableName, sqlparser.String(ddlStmt), ddlStrategySetting, migrationContext, providedUUID, parser) if err != nil { return err } @@ -190,7 +183,7 @@ func NewOnlineDDLs(keyspace string, sql string, ddlStmt sqlparser.DDLStatement, } // NewOnlineDDL creates a schema change request with self generated UUID and RequestTime -func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string) (onlineDDL *OnlineDDL, err error) { +func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting *DDLStrategySetting, migrationContext string, providedUUID string, parser *sqlparser.Parser) (onlineDDL *OnlineDDL, err error) { if ddlStrategySetting == nil { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "NewOnlineDDL: found nil DDLStrategySetting") } @@ -224,7 +217,7 @@ func NewOnlineDDL(keyspace string, table string, sql string, ddlStrategySetting sql = fmt.Sprintf("revert vitess_migration '%s'", uuid) } - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { isLegacyRevertStatement := false // query validation and rebuilding @@ -347,9 +340,9 @@ func (onlineDDL *OnlineDDL) ToJSON() ([]byte, error) { } // sqlWithoutComments returns the SQL statement without comment directives. Useful for tests -func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { +func (onlineDDL *OnlineDDL) sqlWithoutComments(parser *sqlparser.Parser) (sql string, err error) { sql = onlineDDL.SQL - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { // query validation and rebuilding if _, err := legacyParseRevertUUID(sql); err == nil { @@ -373,18 +366,18 @@ func (onlineDDL *OnlineDDL) sqlWithoutComments() (sql string, err error) { } // GetAction extracts the DDL action type from the online DDL statement -func (onlineDDL *OnlineDDL) GetAction() (action sqlparser.DDLAction, err error) { - if _, err := onlineDDL.GetRevertUUID(); err == nil { +func (onlineDDL *OnlineDDL) GetAction(parser *sqlparser.Parser) (action sqlparser.DDLAction, err error) { + if _, err := onlineDDL.GetRevertUUID(parser); err == nil { return sqlparser.RevertDDLAction, nil } - _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL) + _, action, err = ParseOnlineDDLStatement(onlineDDL.SQL, parser) return action, err } // IsView returns 'true' when the statement affects a VIEW -func (onlineDDL *OnlineDDL) IsView() bool { - stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL) +func (onlineDDL *OnlineDDL) IsView(parser *sqlparser.Parser) bool { + stmt, _, err := ParseOnlineDDLStatement(onlineDDL.SQL, parser) if err != nil { return false } @@ -396,8 +389,8 @@ func (onlineDDL *OnlineDDL) IsView() bool { } // GetActionStr returns a string representation of the DDL action -func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionStr string, err error) { - action, err = onlineDDL.GetAction() +func (onlineDDL *OnlineDDL) GetActionStr(parser *sqlparser.Parser) (action sqlparser.DDLAction, actionStr string, err error) { + action, err = onlineDDL.GetAction(parser) if err != nil { return action, actionStr, err } @@ -417,11 +410,11 @@ func (onlineDDL *OnlineDDL) GetActionStr() (action sqlparser.DDLAction, actionSt // GetRevertUUID works when this migration is a revert for another migration. It returns the UUID // fo the reverted migration. // The function returns error when this is not a revert migration. -func (onlineDDL *OnlineDDL) GetRevertUUID() (uuid string, err error) { +func (onlineDDL *OnlineDDL) GetRevertUUID(parser *sqlparser.Parser) (uuid string, err error) { if uuid, err := legacyParseRevertUUID(onlineDDL.SQL); err == nil { return uuid, nil } - if stmt, err := sqlparser.Parse(onlineDDL.SQL); err == nil { + if stmt, err := parser.Parse(onlineDDL.SQL); err == nil { if revert, ok := stmt.(*sqlparser.RevertMigration); ok { return revert.UUID, nil } diff --git a/go/vt/schema/online_ddl_test.go b/go/vt/schema/online_ddl_test.go index c616d64a698..942b9a4274e 100644 --- a/go/vt/schema/online_ddl_test.go +++ b/go/vt/schema/online_ddl_test.go @@ -52,10 +52,11 @@ func TestIsOnlineDDLUUID(t *testing.T) { } func TestGetGCUUID(t *testing.T) { + parser := sqlparser.NewTestParser() uuids := map[string]bool{} count := 20 for i := 0; i < count; i++ { - onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "") + onlineDDL, err := NewOnlineDDL("ks", "tbl", "alter table t drop column c", NewDDLStrategySetting(DDLStrategyDirect, ""), "", "", parser) assert.NoError(t, err) gcUUID := onlineDDL.GetGCUUID() assert.True(t, IsGCUUID(gcUUID)) @@ -86,10 +87,11 @@ func TestGetActionStr(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(parser) if ts.isError { assert.Error(t, err) } else { @@ -147,10 +149,11 @@ func TestGetRevertUUID(t *testing.T) { isError: true, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { onlineDDL := &OnlineDDL{SQL: ts.statement} - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -162,10 +165,10 @@ func TestGetRevertUUID(t *testing.T) { migrationContext := "354b-11eb-82cd-f875a4d24e90" for _, ts := range tt { t.Run(ts.statement, func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.statement, NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "", parser) assert.NoError(t, err) require.NotNil(t, onlineDDL) - uuid, err := onlineDDL.GetRevertUUID() + uuid, err := onlineDDL.GetRevertUUID(parser) if ts.isError { assert.Error(t, err) return @@ -209,11 +212,12 @@ func TestNewOnlineDDL(t *testing.T) { NewDDLStrategySetting(DDLStrategyOnline, "-singleton"), } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.sql, func(t *testing.T) { for _, stgy := range strategies { t.Run(stgy.ToString(), func(t *testing.T) { - onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "") + onlineDDL, err := NewOnlineDDL("test_ks", "t", ts.sql, stgy, migrationContext, "", parser) if ts.isError { assert.Error(t, err) return @@ -231,19 +235,20 @@ func TestNewOnlineDDL(t *testing.T) { t.Run("explicit UUID", func(t *testing.T) { var err error var onlineDDL *OnlineDDL + parser := sqlparser.NewTestParser() - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) assert.NoError(t, err) assert.True(t, IsOnlineDDLUUID(onlineDDL.UUID)) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyOnline, ""), migrationContext, "abc", parser) assert.Error(t, err) - onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90") + onlineDDL, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.NoError(t, err) assert.Equal(t, "4e5dcf80_354b_11eb_82cd_f875a4d24e90", onlineDDL.UUID) - _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90") + _, err = NewOnlineDDL("test_ks", "t", "alter table t engine=innodb", NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, " 4e5dcf80_354b_11eb_82cd_f875a4d24e90", parser) assert.Error(t, err) }) } @@ -284,9 +289,10 @@ func TestNewOnlineDDLs(t *testing.T) { "CREATE TABLE if not exists t (id bigint unsigned NOT NULL AUTO_INCREMENT, ts datetime(6) DEFAULT NULL, error_column NO_SUCH_TYPE NOT NULL, PRIMARY KEY (id)) ENGINE=InnoDB": {isError: true, expectErrorText: "near"}, } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for query, expect := range tests { t.Run(query, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) if expect.parseError { assert.Error(t, err) return @@ -299,7 +305,7 @@ func TestNewOnlineDDLs(t *testing.T) { } assert.True(t, ok) - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, ""), migrationContext, "", parser) if expect.isError { assert.Error(t, err) assert.Contains(t, err.Error(), expect.expectErrorText) @@ -309,12 +315,12 @@ func TestNewOnlineDDLs(t *testing.T) { sqls := []string{} for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) sql = strings.ReplaceAll(sql, "\n", "") sql = strings.ReplaceAll(sql, "\t", "") sqls = append(sqls, sql) - assert.Equal(t, expect.isView, onlineDDL.IsView()) + assert.Equal(t, expect.isView, onlineDDL.IsView(parser)) } assert.Equal(t, expect.sqls, sqls) }) @@ -328,12 +334,13 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { for _, allowForeignKeys := range []bool{false, true} { testName := fmt.Sprintf("%t", allowForeignKeys) t.Run(testName, func(t *testing.T) { - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) require.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) require.True(t, ok) @@ -342,7 +349,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { if allowForeignKeys { flags = "--unsafe-allow-foreign-keys" } - onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "") + onlineDDLs, err := NewOnlineDDLs("test_ks", query, ddlStmt, NewDDLStrategySetting(DDLStrategyVitess, flags), migrationContext, "", parser) if allowForeignKeys { assert.NoError(t, err) } else { @@ -351,7 +358,7 @@ func TestNewOnlineDDLsForeignKeys(t *testing.T) { } for _, onlineDDL := range onlineDDLs { - sql, err := onlineDDL.sqlWithoutComments() + sql, err := onlineDDL.sqlWithoutComments(parser) assert.NoError(t, err) assert.NotEmpty(t, sql) } @@ -373,12 +380,13 @@ func TestOnlineDDLFromCommentedStatement(t *testing.T) { } strategySetting := NewDDLStrategySetting(DDLStrategyGhost, `-singleton -declarative --max-load="Threads_running=5"`) migrationContext := "354b-11eb-82cd-f875a4d24e90" + parser := sqlparser.NewTestParser() for _, query := range queries { t.Run(query, func(t *testing.T) { - o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "") + o1, err := NewOnlineDDL("ks", "t", query, strategySetting, migrationContext, "", parser) require.NoError(t, err) - stmt, err := sqlparser.Parse(o1.SQL) + stmt, err := parser.Parse(o1.SQL) require.NoError(t, err) o2, err := OnlineDDLFromCommentedStatement(stmt) diff --git a/go/vt/schemadiff/diff.go b/go/vt/schemadiff/diff.go index fce1e5e99db..b46a7d23cc6 100644 --- a/go/vt/schemadiff/diff.go +++ b/go/vt/schemadiff/diff.go @@ -27,11 +27,11 @@ func AllSubsequent(diff EntityDiff) (diffs []EntityDiff) { // DiffCreateTablesQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateTable, DropTable or AlterTable -func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints, parser *sqlparser.Parser) (EntityDiff, error) { var fromCreateTable *sqlparser.CreateTable var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := parser.ParseStrictDDL(query1) if err != nil { return nil, err } @@ -42,7 +42,7 @@ func DiffCreateTablesQueries(query1 string, query2 string, hints *DiffHints) (En } var toCreateTable *sqlparser.CreateTable if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := parser.ParseStrictDDL(query2) if err != nil { return nil, err } @@ -89,11 +89,11 @@ func DiffTables(create1 *sqlparser.CreateTable, create2 *sqlparser.CreateTable, // DiffCreateViewsQueries compares two `CREATE TABLE ...` queries (in string form) and returns the diff from table1 to table2. // Either or both of the queries can be empty. Based on this, the diff could be // nil, CreateView, DropView or AlterView -func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (EntityDiff, error) { +func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints, parser *sqlparser.Parser) (EntityDiff, error) { var fromCreateView *sqlparser.CreateView var ok bool if query1 != "" { - stmt, err := sqlparser.ParseStrictDDL(query1) + stmt, err := parser.ParseStrictDDL(query1) if err != nil { return nil, err } @@ -104,7 +104,7 @@ func DiffCreateViewsQueries(query1 string, query2 string, hints *DiffHints) (Ent } var toCreateView *sqlparser.CreateView if query2 != "" { - stmt, err := sqlparser.ParseStrictDDL(query2) + stmt, err := parser.ParseStrictDDL(query2) if err != nil { return nil, err } @@ -151,12 +151,12 @@ func DiffViews(create1 *sqlparser.CreateView, create2 *sqlparser.CreateView, hin // DiffSchemasSQL compares two schemas and returns the rich diff that turns // 1st schema into 2nd. Schemas are build from SQL, each of which can contain an arbitrary number of // CREATE TABLE and CREATE VIEW statements. -func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints) (*SchemaDiff, error) { - schema1, err := NewSchemaFromSQL(sql1) +func DiffSchemasSQL(sql1 string, sql2 string, hints *DiffHints, parser *sqlparser.Parser) (*SchemaDiff, error) { + schema1, err := NewSchemaFromSQL(sql1, parser) if err != nil { return nil, err } - schema2, err := NewSchemaFromSQL(sql2) + schema2, err := NewSchemaFromSQL(sql2, parser) if err != nil { return nil, err } diff --git a/go/vt/schemadiff/diff_test.go b/go/vt/schemadiff/diff_test.go index d2a170f4752..231cb4a352b 100644 --- a/go/vt/schemadiff/diff_test.go +++ b/go/vt/schemadiff/diff_test.go @@ -190,6 +190,7 @@ func TestDiffTables(t *testing.T) { }, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateTable *sqlparser.CreateTable @@ -198,7 +199,7 @@ func TestDiffTables(t *testing.T) { hints = ts.hints } if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := parser.ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateTable, ok = fromStmt.(*sqlparser.CreateTable) @@ -206,7 +207,7 @@ func TestDiffTables(t *testing.T) { } var toCreateTable *sqlparser.CreateTable if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := parser.ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateTable, ok = toStmt.(*sqlparser.CreateTable) @@ -218,7 +219,7 @@ func TestDiffTables(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateTablesQueries(ts.from, ts.to, hints) + dq, dqerr := DiffCreateTablesQueries(ts.from, ts.to, hints, sqlparser.NewTestParser()) d, err := DiffTables(fromCreateTable, toCreateTable, hints) switch { case ts.isError: @@ -241,7 +242,7 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = parser.ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -260,7 +261,7 @@ func TestDiffTables(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = parser.ParseStrictDDL(canonicalDiff) assert.NoError(t, err) } // let's also check dq, and also validate that dq's statement is identical to d's @@ -322,11 +323,12 @@ func TestDiffViews(t *testing.T) { }, } hints := &DiffHints{} + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { var fromCreateView *sqlparser.CreateView if ts.from != "" { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := parser.ParseStrictDDL(ts.from) assert.NoError(t, err) var ok bool fromCreateView, ok = fromStmt.(*sqlparser.CreateView) @@ -334,7 +336,7 @@ func TestDiffViews(t *testing.T) { } var toCreateView *sqlparser.CreateView if ts.to != "" { - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := parser.ParseStrictDDL(ts.to) assert.NoError(t, err) var ok bool toCreateView, ok = toStmt.(*sqlparser.CreateView) @@ -346,7 +348,7 @@ func TestDiffViews(t *testing.T) { // Technically, DiffCreateTablesQueries calls DiffTables, // but we expose both to users of this library. so we want to make sure // both work as expected irrespective of any relationship between them. - dq, dqerr := DiffCreateViewsQueries(ts.from, ts.to, hints) + dq, dqerr := DiffCreateViewsQueries(ts.from, ts.to, hints, parser) d, err := DiffViews(fromCreateView, toCreateView, hints) switch { case ts.isError: @@ -369,7 +371,7 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(diff) + _, err = parser.ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := d.Entities() @@ -388,7 +390,7 @@ func TestDiffViews(t *testing.T) { assert.Equal(t, ts.action, action) // validate we can parse back the statement - _, err = sqlparser.ParseStrictDDL(canonicalDiff) + _, err = parser.ParseStrictDDL(canonicalDiff) assert.NoError(t, err) } @@ -796,12 +798,13 @@ func TestDiffSchemas(t *testing.T) { }, }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { hints := &DiffHints{ TableRenameStrategy: ts.tableRename, } - diff, err := DiffSchemasSQL(ts.from, ts.to, hints) + diff, err := DiffSchemasSQL(ts.from, ts.to, hints, parser) if ts.expectError != "" { require.Error(t, err) assert.Contains(t, err.Error(), ts.expectError) @@ -827,21 +830,21 @@ func TestDiffSchemas(t *testing.T) { // validate we can parse back the diff statements for _, s := range statements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := parser.ParseStrictDDL(s) assert.NoError(t, err) } for _, s := range cstatements { - _, err := sqlparser.ParseStrictDDL(s) + _, err := parser.ParseStrictDDL(s) assert.NoError(t, err) } { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(ts.from, parser) require.NoError(t, err) schema1SQL := schema1.ToSQL() - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(ts.to, parser) require.NoError(t, err) applied, err := schema1.Apply(diffs) require.NoError(t, err) @@ -892,12 +895,13 @@ func TestSchemaApplyError(t *testing.T) { }, } hints := &DiffHints{} + parser := sqlparser.NewTestParser() for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { // Validate "apply()" on "from" converges with "to" - schema1, err := NewSchemaFromSQL(ts.from) + schema1, err := NewSchemaFromSQL(ts.from, parser) assert.NoError(t, err) - schema2, err := NewSchemaFromSQL(ts.to) + schema2, err := NewSchemaFromSQL(ts.to, parser) assert.NoError(t, err) { diff --git a/go/vt/schemadiff/schema.go b/go/vt/schemadiff/schema.go index ec9174dd232..5e776dbd3b3 100644 --- a/go/vt/schemadiff/schema.go +++ b/go/vt/schemadiff/schema.go @@ -98,10 +98,10 @@ func NewSchemaFromStatements(statements []sqlparser.Statement) (*Schema, error) } // NewSchemaFromQueries creates a valid and normalized schema based on list of queries -func NewSchemaFromQueries(queries []string) (*Schema, error) { +func NewSchemaFromQueries(queries []string, parser *sqlparser.Parser) (*Schema, error) { statements := make([]sqlparser.Statement, 0, len(queries)) for _, q := range queries { - stmt, err := sqlparser.ParseStrictDDL(q) + stmt, err := parser.ParseStrictDDL(q) if err != nil { return nil, err } @@ -112,9 +112,9 @@ func NewSchemaFromQueries(queries []string) (*Schema, error) { // NewSchemaFromSQL creates a valid and normalized schema based on a SQL blob that contains // CREATE statements for various objects (tables, views) -func NewSchemaFromSQL(sql string) (*Schema, error) { +func NewSchemaFromSQL(sql string, parser *sqlparser.Parser) (*Schema, error) { var statements []sqlparser.Statement - tokenizer := sqlparser.NewStringTokenizer(sql) + tokenizer := parser.NewStringTokenizer(sql) for { stmt, err := sqlparser.ParseNextStrictDDL(tokenizer) if err != nil { @@ -1041,10 +1041,8 @@ func (s *Schema) getTableColumnNames(t *CreateTableEntity) (columnNames []*sqlpa } // getViewColumnNames returns the names of aliased columns returned by a given view. -func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *declarativeSchemaInformation) ( - columnNames []*sqlparser.IdentifierCI, - err error, -) { +func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *declarativeSchemaInformation) ([]*sqlparser.IdentifierCI, error) { + var columnNames []*sqlparser.IdentifierCI for _, node := range v.Select.GetColumns() { switch node := node.(type) { case *sqlparser.StarExpr: @@ -1074,8 +1072,5 @@ func (s *Schema) getViewColumnNames(v *CreateViewEntity, schemaInformation *decl } } - if err != nil { - return nil, err - } return columnNames, nil } diff --git a/go/vt/schemadiff/schema_diff_test.go b/go/vt/schemadiff/schema_diff_test.go index df7d893356f..c41ee8e7839 100644 --- a/go/vt/schemadiff/schema_diff_test.go +++ b/go/vt/schemadiff/schema_diff_test.go @@ -23,6 +23,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestPermutations(t *testing.T) { @@ -163,11 +165,11 @@ func TestPermutations(t *testing.T) { for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(tc.fromQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(tc.toQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, toSchema) @@ -842,11 +844,11 @@ func TestSchemaDiff(t *testing.T) { if tc.fromQueries == nil { tc.fromQueries = createQueries } - fromSchema, err := NewSchemaFromQueries(tc.fromQueries) + fromSchema, err := NewSchemaFromQueries(tc.fromQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, fromSchema) - toSchema, err := NewSchemaFromQueries(tc.toQueries) + toSchema, err := NewSchemaFromQueries(tc.toQueries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, toSchema) diff --git a/go/vt/schemadiff/schema_test.go b/go/vt/schemadiff/schema_test.go index a1b55544593..05bf6c12df6 100644 --- a/go/vt/schemadiff/schema_test.go +++ b/go/vt/schemadiff/schema_test.go @@ -84,7 +84,7 @@ var schemaTestExpectSortedViewNames = []string{ var schemaTestToSQL = "CREATE TABLE `t1` (\n\t`id` int\n);\nCREATE TABLE `t2` (\n\t`id` int\n);\nCREATE TABLE `t3` (\n\t`id` int,\n\t`type` enum('foo', 'bar') NOT NULL DEFAULT 'foo'\n);\nCREATE TABLE `t5` (\n\t`id` int\n);\nCREATE VIEW `v0` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v3` AS SELECT *, `id` + 1 AS `id_plus`, `id` + 2 FROM `t3` AS `t3`;\nCREATE VIEW `v9` AS SELECT 1 FROM `dual`;\nCREATE VIEW `v1` AS SELECT * FROM `v3`;\nCREATE VIEW `v2` AS SELECT * FROM `v3`, `t2`;\nCREATE VIEW `v4` AS SELECT * FROM `t2` AS `something_else`, `v3`;\nCREATE VIEW `v5` AS SELECT * FROM `t1`, (SELECT * FROM `v3`) AS `some_alias`;\nCREATE VIEW `v6` AS SELECT * FROM `v4`;\n" func TestNewSchemaFromQueries(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -94,7 +94,7 @@ func TestNewSchemaFromQueries(t *testing.T) { } func TestNewSchemaFromSQL(t *testing.T) { - schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";")) + schema, err := NewSchemaFromSQL(strings.Join(schemaTestCreateQueries, ";"), sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -108,7 +108,7 @@ func TestNewSchemaFromQueriesWithDuplicate(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v2 as select * from v1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ApplyDuplicateEntityError{Entity: "v2"}).Error()) } @@ -118,7 +118,7 @@ func TestNewSchemaFromQueriesUnresolved(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from v8, t2", ) - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) v := schema.sorted[len(schema.sorted)-1] @@ -131,7 +131,7 @@ func TestNewSchemaFromQueriesUnresolvedAlias(t *testing.T) { queries := append(schemaTestCreateQueries, "create view v7 as select * from something_else as t1, t2", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.Error(t, err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } @@ -141,7 +141,7 @@ func TestNewSchemaFromQueriesViewFromDual(t *testing.T) { queries := []string{ "create view v20 as select 1 from dual", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.NoError(t, err) } @@ -150,7 +150,7 @@ func TestNewSchemaFromQueriesViewFromDualImplicit(t *testing.T) { queries := []string{ "create view v20 as select 1", } - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) assert.NoError(t, err) } @@ -160,14 +160,14 @@ func TestNewSchemaFromQueriesLoop(t *testing.T) { "create view v7 as select * from v8, t2", "create view v8 as select * from t1, v7", ) - _, err := NewSchemaFromQueries(queries) + _, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) require.Error(t, err) err = vterrors.UnwrapFirst(err) assert.EqualError(t, err, (&ViewDependencyUnresolvedError{View: "v7"}).Error()) } func TestToSQL(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -176,7 +176,7 @@ func TestToSQL(t *testing.T) { } func TestCopy(t *testing.T) { - schema, err := NewSchemaFromQueries(schemaTestCreateQueries) + schema, err := NewSchemaFromQueries(schemaTestCreateQueries, sqlparser.NewTestParser()) assert.NoError(t, err) require.NotNil(t, schema) @@ -223,7 +223,7 @@ func TestGetViewDependentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.view, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.view) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.view) require.NoError(t, err) createView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) @@ -263,7 +263,7 @@ func TestGetForeignKeyParentTableNames(t *testing.T) { } for _, ts := range tt { t.Run(ts.table, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.table) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.table) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -299,7 +299,7 @@ func TestTableForeignKeyOrdering(t *testing.T) { "v13", "v09", } - schema, err := NewSchemaFromQueries(fkQueries) + schema, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) require.NoError(t, err) assert.NotNil(t, schema) @@ -407,7 +407,7 @@ func TestInvalidSchema(t *testing.T) { for _, ts := range tt { t.Run(ts.schema, func(t *testing.T) { - _, err := NewSchemaFromSQL(ts.schema) + _, err := NewSchemaFromSQL(ts.schema, sqlparser.NewTestParser()) if ts.expectErr == nil { assert.NoError(t, err) } else { @@ -425,7 +425,7 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t15(id int, primary key(id))", } - s, err := NewSchemaFromQueries(fkQueries) + s, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) assert.Error(t, err) // Even though there's errors, we still expect the schema to have been created. assert.NotNil(t, s) @@ -443,7 +443,7 @@ func TestInvalidTableForeignKeyReference(t *testing.T) { "create table t11 (id int primary key, i int, constraint f12 foreign key (i) references t12(id) on delete restrict)", "create table t12 (id int primary key, i int, constraint f13 foreign key (i) references t13(id) on delete restrict)", } - _, err := NewSchemaFromQueries(fkQueries) + _, err := NewSchemaFromQueries(fkQueries, sqlparser.NewTestParser()) assert.Error(t, err) assert.ErrorContains(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t11"}).Error()) assert.ErrorContains(t, err, (&ForeignKeyDependencyUnresolvedError{Table: "t12"}).Error()) @@ -468,7 +468,7 @@ func TestGetEntityColumnNames(t *testing.T) { "create view vb as select *, now() from v8", } - schema, err := NewSchemaFromQueries(queries) + schema, err := NewSchemaFromQueries(queries, sqlparser.NewTestParser()) require.NoError(t, err) require.NotNil(t, schema) @@ -746,7 +746,7 @@ func TestViewReferences(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - schema, err := NewSchemaFromQueries(ts.queries) + schema, err := NewSchemaFromQueries(ts.queries, sqlparser.NewTestParser()) if ts.expectErr == nil { require.NoError(t, err) require.NotNil(t, schema) @@ -838,9 +838,9 @@ func TestMassiveSchema(t *testing.T) { queries1 = append(queries1, query) tableNames[tableName] = true } - schema0, err = NewSchemaFromQueries(queries0) + schema0, err = NewSchemaFromQueries(queries0, sqlparser.NewTestParser()) require.NoError(t, err) - schema1, err = NewSchemaFromQueries(queries1) + schema1, err = NewSchemaFromQueries(queries1, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, countModifiedTables, modifyTables) diff --git a/go/vt/schemadiff/semantics.go b/go/vt/schemadiff/semantics.go index ee7ef4e3b1a..ca4b57c62e1 100644 --- a/go/vt/schemadiff/semantics.go +++ b/go/vt/schemadiff/semantics.go @@ -53,7 +53,11 @@ func (si *declarativeSchemaInformation) FindTableOrVindex(tablename sqlparser.Ta } func (si *declarativeSchemaInformation) ConnCollation() collations.ID { - return 45 + return collations.CollationUtf8mb4ID +} + +func (si *declarativeSchemaInformation) CollationEnv() *collations.Environment { + return collations.MySQL8() } func (si *declarativeSchemaInformation) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { diff --git a/go/vt/schemadiff/table_test.go b/go/vt/schemadiff/table_test.go index e2ef58c1a6f..5e159ffca99 100644 --- a/go/vt/schemadiff/table_test.go +++ b/go/vt/schemadiff/table_test.go @@ -1267,12 +1267,12 @@ func TestCreateTableDiff(t *testing.T) { standardHints := DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := fromStmt.(*sqlparser.CreateTable) require.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := toStmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -1332,7 +1332,7 @@ func TestCreateTableDiff(t *testing.T) { } } // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(diff) assert.NoError(t, err) // Validate "from/to" entities @@ -1362,7 +1362,7 @@ func TestCreateTableDiff(t *testing.T) { { cdiff := alter.CanonicalStatementString() assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(cdiff) assert.NoError(t, err) } @@ -1859,12 +1859,12 @@ func TestValidate(t *testing.T) { hints := DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(ts.alter) + stmt, err = sqlparser.NewTestParser().ParseStrictDDL(ts.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -1888,7 +1888,7 @@ func TestValidate(t *testing.T) { require.True(t, ok) applied = c.normalize() - stmt, err := sqlparser.ParseStrictDDL(ts.to) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) require.NoError(t, err) toCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -2172,7 +2172,7 @@ func TestNormalize(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -2261,7 +2261,7 @@ func TestIndexesCoveringForeignKeyColumns(t *testing.T) { }, } - stmt, err := sqlparser.ParseStrictDDL(sql) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(sql) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) diff --git a/go/vt/schemadiff/view_test.go b/go/vt/schemadiff/view_test.go index 939308d056c..d32739d7190 100644 --- a/go/vt/schemadiff/view_test.go +++ b/go/vt/schemadiff/view_test.go @@ -148,12 +148,12 @@ func TestCreateViewDiff(t *testing.T) { hints := &DiffHints{} for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - fromStmt, err := sqlparser.ParseStrictDDL(ts.from) + fromStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) assert.NoError(t, err) fromCreateView, ok := fromStmt.(*sqlparser.CreateView) assert.True(t, ok) - toStmt, err := sqlparser.ParseStrictDDL(ts.to) + toStmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.to) assert.NoError(t, err) toCreateView, ok := toStmt.(*sqlparser.CreateView) assert.True(t, ok) @@ -177,7 +177,7 @@ func TestCreateViewDiff(t *testing.T) { diff := alter.StatementString() assert.Equal(t, ts.diff, diff) // validate we can parse back the statement - _, err := sqlparser.ParseStrictDDL(diff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(diff) assert.NoError(t, err) eFrom, eTo := alter.Entities() @@ -199,7 +199,7 @@ func TestCreateViewDiff(t *testing.T) { { cdiff := alter.CanonicalStatementString() assert.Equal(t, ts.cdiff, cdiff) - _, err := sqlparser.ParseStrictDDL(cdiff) + _, err := sqlparser.NewTestParser().ParseStrictDDL(cdiff) assert.NoError(t, err) } } @@ -241,7 +241,7 @@ func TestNormalizeView(t *testing.T) { } for _, ts := range tt { t.Run(ts.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(ts.from) + stmt, err := sqlparser.NewTestParser().ParseStrictDDL(ts.from) require.NoError(t, err) fromCreateView, ok := stmt.(*sqlparser.CreateView) require.True(t, ok) diff --git a/go/vt/schemamanager/schemamanager_test.go b/go/vt/schemamanager/schemamanager_test.go index 154d985bba4..b4724241cd1 100644 --- a/go/vt/schemamanager/schemamanager_test.go +++ b/go/vt/schemamanager/schemamanager_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" "vitess.io/vitess/go/vt/topo" @@ -94,7 +96,7 @@ func TestSchemaManagerExecutorOpenFail(t *testing.T) { controller := newFakeController( []string{"create table test_table (pk int);"}, false, false, false) controller.SetKeyspace("unknown_keyspace") - executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorOpenFail", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() _, err := Run(ctx, controller, executor) @@ -125,7 +127,7 @@ func TestSchemaManagerRun(t *testing.T) { }) fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) - executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerRun", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) @@ -176,7 +178,7 @@ func TestSchemaManagerExecutorFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() resp, err := Run(ctx, controller, executor) @@ -196,7 +198,7 @@ func TestSchemaManagerExecutorBatchVsStrategyFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("online") ctx := context.Background() @@ -212,7 +214,7 @@ func TestSchemaManagerExecutorBatchVsQueriesFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") ctx := context.Background() @@ -228,7 +230,7 @@ func TestSchemaManagerExecutorBatchVsUUIDsFail(t *testing.T) { fakeTmc.AddSchemaDefinition("vt_test_keyspace", &tabletmanagerdatapb.SchemaDefinition{}) fakeTmc.EnableExecuteFetchAsDbaError = true - executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10) + executor := NewTabletExecutor("TestSchemaManagerExecutorFail", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 10, sqlparser.NewTestParser()) executor.SetDDLStrategy("direct") executor.SetUUIDList([]string{"4e5dcf80_354b_11eb_82cd_f875a4d24e90"}) @@ -271,7 +273,7 @@ func TestSchemaManagerRegisterControllerFactory(t *testing.T) { } func newFakeExecutor(t *testing.T) *TabletExecutor { - return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + return NewTabletExecutor("newFakeExecutor", newFakeTopo(t), newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) } func newFakeTabletManagerClient() *fakeTabletManagerClient { diff --git a/go/vt/schemamanager/tablet_executor.go b/go/vt/schemamanager/tablet_executor.go index a56a95d5034..cd1691dd01e 100644 --- a/go/vt/schemamanager/tablet_executor.go +++ b/go/vt/schemamanager/tablet_executor.go @@ -53,10 +53,11 @@ type TabletExecutor struct { ddlStrategySetting *schema.DDLStrategySetting uuids []string batchSize int64 + parser *sqlparser.Parser } // NewTabletExecutor creates a new TabletExecutor instance -func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64) *TabletExecutor { +func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.TabletManagerClient, logger logutil.Logger, waitReplicasTimeout time.Duration, batchSize int64, parser *sqlparser.Parser) *TabletExecutor { return &TabletExecutor{ ts: ts, tmc: tmc, @@ -65,6 +66,7 @@ func NewTabletExecutor(migrationContext string, ts *topo.Server, tmc tmclient.Ta waitReplicasTimeout: waitReplicasTimeout, migrationContext: migrationContext, batchSize: batchSize, + parser: parser, } } @@ -146,7 +148,7 @@ func (exec *TabletExecutor) Validate(ctx context.Context, sqls []string) error { func (exec *TabletExecutor) parseDDLs(sqls []string) error { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -204,14 +206,14 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided return executeViaFetch() } // Analyze what type of query this is: - stmt, err := sqlparser.Parse(sql) + stmt, err := exec.parser.Parse(sql) if err != nil { return false, err } switch stmt := stmt.(type) { case sqlparser.DDLStatement: if exec.isOnlineSchemaDDL(stmt) { - onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID) + onlineDDLs, err := schema.NewOnlineDDLs(exec.keyspace, sql, stmt, exec.ddlStrategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -227,7 +229,7 @@ func (exec *TabletExecutor) executeSQL(ctx context.Context, sql string, provided } case *sqlparser.RevertMigration: strategySetting := schema.NewDDLStrategySetting(schema.DDLStrategyOnline, exec.ddlStrategySetting.Options) - onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID) + onlineDDL, err := schema.NewOnlineDDL(exec.keyspace, "", sqlparser.String(stmt), strategySetting, exec.migrationContext, providedUUID, exec.parser) if err != nil { execResult.ExecutorErr = err.Error() return false, err @@ -265,9 +267,9 @@ func batchSQLs(sqls []string, batchSize int) (batchedSQLs []string) { // allSQLsAreCreateQueries returns 'true' when all given queries are CREATE TABLE|VIEW // This function runs pretty fast even for thousands of tables (its overhead is insignificant compared with // the time it would take to apply the changes). -func allSQLsAreCreateQueries(sqls []string) (bool, error) { +func allSQLsAreCreateQueries(sqls []string, parser *sqlparser.Parser) (bool, error) { for _, sql := range sqls { - stmt, err := sqlparser.Parse(sql) + stmt, err := parser.Parse(sql) if err != nil { return false, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "failed to parse sql: %s, got error: %v", sql, err) } @@ -377,7 +379,7 @@ func (exec *TabletExecutor) Execute(ctx context.Context, sqls []string) *Execute if exec.hasProvidedUUIDs() { return errorExecResult(fmt.Errorf("--batch-size conflicts with --uuid-list. Batching does not support UUIDs.")) } - allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls) + allSQLsAreCreate, err := allSQLsAreCreateQueries(sqls, exec.parser) if err != nil { return errorExecResult(err) } @@ -444,16 +446,16 @@ func (exec *TabletExecutor) executeOnAllTablets(ctx context.Context, execResult // applyAllowZeroInDate takes a SQL string which may contain one or more statements, // and, assuming those are DDLs, adds a /*vt+ allowZeroInDate=true */ directive to all of them, // returning the result again as one long SQL. -func applyAllowZeroInDate(sql string) (string, error) { +func applyAllowZeroInDate(sql string, parser *sqlparser.Parser) (string, error) { // sql may be a batch of multiple statements - sqls, err := sqlparser.SplitStatementToPieces(sql) + sqls, err := parser.SplitStatementToPieces(sql) if err != nil { return sql, err } var modifiedSqls []string for _, singleSQL := range sqls { // --allow-zero-in-date Applies to DDLs - stmt, err := sqlparser.Parse(singleSQL) + stmt, err := parser.Parse(singleSQL) if err != nil { return sql, err } @@ -486,7 +488,7 @@ func (exec *TabletExecutor) executeOneTablet( } else { if exec.ddlStrategySetting != nil && exec.ddlStrategySetting.IsAllowZeroInDateFlag() { // --allow-zero-in-date Applies to DDLs - sql, err = applyAllowZeroInDate(sql) + sql, err = applyAllowZeroInDate(sql, exec.parser) if err != nil { errChan <- ShardWithError{Shard: tablet.Shard, Err: err.Error()} return diff --git a/go/vt/schemamanager/tablet_executor_test.go b/go/vt/schemamanager/tablet_executor_test.go index 175e10dfb66..0ae960e6e9c 100644 --- a/go/vt/schemamanager/tablet_executor_test.go +++ b/go/vt/schemamanager/tablet_executor_test.go @@ -72,7 +72,7 @@ func TestTabletExecutorOpenWithEmptyPrimaryAlias(t *testing.T) { if err := ts.InitTablet(ctx, tablet, false /*allowPrimaryOverride*/, true /*createShardAndKeyspace*/, false /*allowUpdate*/); err != nil { t.Fatalf("InitTablet failed: %v", err) } - executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorOpenWithEmptyPrimaryAlias", ts, newFakeTabletManagerClient(), logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) if err := executor.Open(ctx, "test_keyspace"); err == nil || !strings.Contains(err.Error(), "does not have a primary") { t.Fatalf("executor.Open() = '%v', want error", err) } @@ -105,7 +105,7 @@ func TestTabletExecutorValidate(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorValidate", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() sqls := []string{ @@ -179,7 +179,7 @@ func TestTabletExecutorDML(t *testing.T) { }, }) - executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0) + executor := NewTabletExecutor("TestTabletExecutorDML", newFakeTopo(t), fakeTmc, logutil.NewConsoleLogger(), testWaitReplicasTimeout, 0, sqlparser.NewTestParser()) ctx := context.Background() executor.Open(ctx, "unsharded_keyspace") @@ -269,12 +269,13 @@ func TestIsOnlineSchemaDDL(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, ts := range tt { e := &TabletExecutor{} err := e.SetDDLStrategy(ts.ddlStrategy) assert.NoError(t, err) - stmt, err := sqlparser.Parse(ts.query) + stmt, err := parser.Parse(ts.query) assert.NoError(t, err) ddlStmt, ok := stmt.(sqlparser.DDLStatement) @@ -402,7 +403,7 @@ func TestAllSQLsAreCreateQueries(t *testing.T) { for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - result, err := allSQLsAreCreateQueries(tcase.sqls) + result, err := allSQLsAreCreateQueries(tcase.sqls, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) @@ -437,7 +438,7 @@ func TestApplyAllowZeroInDate(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - result, err := applyAllowZeroInDate(tcase.sql) + result, err := applyAllowZeroInDate(tcase.sql, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tcase.expect, result) }) diff --git a/go/vt/servenv/http.go b/go/vt/servenv/http.go index f4b001383d1..57cf19673ad 100644 --- a/go/vt/servenv/http.go +++ b/go/vt/servenv/http.go @@ -22,6 +22,9 @@ import ( "net/http" "net/http/pprof" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv/internal/mux" ) @@ -46,6 +49,14 @@ func HTTPServe(l net.Listener) error { // HTTPRegisterProfile registers the default pprof HTTP endpoints with the internal servenv mux. func HTTPRegisterProfile() { + if !httpPprof { + return + } + + if !pflag.Lookup("pprof-http").Changed { + log.Warning("Beginning in v20, pprof-http will default to `false`; to continue enabling pprof endpoints, please manually set this flag before upgrading.") + } + HTTPHandleFunc("/debug/pprof/", pprof.Index) HTTPHandleFunc("/debug/pprof/cmdline", pprof.Cmdline) HTTPHandleFunc("/debug/pprof/profile", pprof.Profile) diff --git a/go/vt/servenv/mysql.go b/go/vt/servenv/mysql.go index 94019a1c42c..6a9b48e495b 100644 --- a/go/vt/servenv/mysql.go +++ b/go/vt/servenv/mysql.go @@ -17,13 +17,17 @@ limitations under the License. package servenv import ( + "fmt" + "github.com/spf13/pflag" + + "vitess.io/vitess/go/mysql/config" ) // mySQLServerVersion is what Vitess will present as it's version during the connection handshake, // and as the value to the @@version system variable. If nothing is provided, Vitess will report itself as // a specific MySQL version with the vitess version appended to it -var mySQLServerVersion = "8.0.30-Vitess" +var mySQLServerVersion = fmt.Sprintf("%s-Vitess", config.DefaultMySQLVersion) // RegisterMySQLServerFlags installs the flags needed to specify or expose a // particular MySQL server version from Vitess. @@ -51,6 +55,7 @@ func init() { "vtbackup", "vtcombo", "vtctl", + "vtctld", "vtctldclient", "vtexplain", "vtgate", diff --git a/go/vt/servenv/pprof.go b/go/vt/servenv/pprof.go index 7aff18ca05a..66d2dd4ed22 100644 --- a/go/vt/servenv/pprof.go +++ b/go/vt/servenv/pprof.go @@ -35,6 +35,7 @@ import ( var ( pprofFlag []string + httpPprof = true ) type profmode string @@ -298,6 +299,7 @@ func (prof *profile) init() (start func(), stop func()) { func init() { OnParse(func(fs *pflag.FlagSet) { + fs.BoolVar(&httpPprof, "pprof-http", httpPprof, "enable pprof http endpoints") fs.StringSliceVar(&pprofFlag, "pprof", pprofFlag, "enable profiling") }) OnInit(pprofInit) diff --git a/go/vt/servenv/truncate_query.go b/go/vt/servenv/truncate_query.go new file mode 100644 index 00000000000..fdb618c5c6a --- /dev/null +++ b/go/vt/servenv/truncate_query.go @@ -0,0 +1,34 @@ +package servenv + +import ( + "github.com/spf13/pflag" +) + +var ( + // TruncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. + TruncateUILen = 512 + + // TruncateErrLen truncate queries in error logs to the given length. 0 means unlimited. + TruncateErrLen = 0 +) + +func registerQueryTruncationFlags(fs *pflag.FlagSet) { + fs.IntVar(&TruncateUILen, "sql-max-length-ui", TruncateUILen, "truncate queries in debug UIs to the given length (default 512)") + fs.IntVar(&TruncateErrLen, "sql-max-length-errors", TruncateErrLen, "truncate queries in error logs to the given length (default unlimited)") +} + +func init() { + for _, cmd := range []string{ + "vtgate", + "vttablet", + "vtcombo", + "vtctld", + "vtctl", + "vtexplain", + "vtbackup", + "vttestserver", + "vtbench", + } { + OnParseFor(cmd, registerQueryTruncationFlags) + } +} diff --git a/go/vt/sidecardb/sidecardb.go b/go/vt/sidecardb/sidecardb.go index 4b8c37039d7..4f3ea2e8252 100644 --- a/go/vt/sidecardb/sidecardb.go +++ b/go/vt/sidecardb/sidecardb.go @@ -114,8 +114,8 @@ func init() { })) } -func validateSchemaDefinition(name, schema string) (string, error) { - stmt, err := sqlparser.ParseStrictDDL(schema) +func validateSchemaDefinition(name, schema string, parser *sqlparser.Parser) (string, error) { + stmt, err := parser.ParseStrictDDL(schema) if err != nil { return "", err @@ -143,7 +143,7 @@ func validateSchemaDefinition(name, schema string) (string, error) { // loadSchemaDefinitions loads the embedded schema definitions // into a slice of sidecarTables for processing. -func loadSchemaDefinitions() { +func loadSchemaDefinitions(parser *sqlparser.Parser) { sqlFileExtension := ".sql" err := fs.WalkDir(schemaLocation, ".", func(path string, entry fs.DirEntry, err error) error { if err != nil { @@ -172,7 +172,7 @@ func loadSchemaDefinitions() { panic(err) } var normalizedSchema string - if normalizedSchema, err = validateSchemaDefinition(name, string(schema)); err != nil { + if normalizedSchema, err = validateSchemaDefinition(name, string(schema), parser); err != nil { return err } sidecarTables = append(sidecarTables, &sidecarTable{name: name, module: module, path: path, schema: normalizedSchema}) @@ -197,6 +197,7 @@ type schemaInit struct { ctx context.Context exec Exec dbCreated bool // The first upgrade/create query will also create the sidecar database if required. + parser *sqlparser.Parser } // Exec is a callback that has to be passed to Init() to @@ -228,15 +229,18 @@ func getDDLErrorHistory() []*ddlError { // Init creates or upgrades the sidecar database based on // the declarative schema defined for all tables. -func Init(ctx context.Context, exec Exec) error { +func Init(ctx context.Context, exec Exec, parser *sqlparser.Parser) error { printCallerDetails() // for debug purposes only, remove in v17 log.Infof("Starting sidecardb.Init()") - once.Do(loadSchemaDefinitions) + once.Do(func() { + loadSchemaDefinitions(parser) + }) si := &schemaInit{ - ctx: ctx, - exec: exec, + ctx: ctx, + exec: exec, + parser: parser, } // There are paths in the tablet initialization where we @@ -371,7 +375,7 @@ func (si *schemaInit) findTableSchemaDiff(tableName, current, desired string) (s TableCharsetCollateStrategy: schemadiff.TableCharsetCollateIgnoreAlways, AlterTableAlgorithmStrategy: schemadiff.AlterTableAlgorithmStrategyCopy, } - diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints) + diff, err := schemadiff.DiffCreateTablesQueries(current, desired, hints, si.parser) if err != nil { return "", err } @@ -459,8 +463,10 @@ func (t *sidecarTable) String() string { // AddSchemaInitQueries adds sidecar database schema related // queries to a mock db. // This is for unit tests only! -func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool) { - once.Do(loadSchemaDefinitions) +func AddSchemaInitQueries(db *fakesqldb.DB, populateTables bool, parser *sqlparser.Parser) { + once.Do(func() { + loadSchemaDefinitions(parser) + }) result := &sqltypes.Result{} for _, q := range sidecar.DBInitQueryPatterns { db.AddQueryPattern(q, result) diff --git a/go/vt/sidecardb/sidecardb_test.go b/go/vt/sidecardb/sidecardb_test.go index 22147c960e9..1565e0cb754 100644 --- a/go/vt/sidecardb/sidecardb_test.go +++ b/go/vt/sidecardb/sidecardb_test.go @@ -25,6 +25,7 @@ import ( "testing" "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/require" @@ -41,12 +42,13 @@ func TestInitErrors(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + parser := sqlparser.NewTestParser() + AddSchemaInitQueries(db, false, parser) ddlErrorCount.Set(0) ddlCount.Set(0) - cp := db.ConnParams() + cp := dbconfigs.New(db.ConnParams()) conn, err := cp.Connect(ctx) require.NoError(t, err) @@ -69,7 +71,7 @@ func TestInitErrors(t *testing.T) { } // simulate errors for the table creation DDLs applied for tables specified in schemaErrors - stmt, err := sqlparser.Parse(query) + stmt, err := parser.Parse(query) if err != nil { return nil, err } @@ -85,7 +87,7 @@ func TestInitErrors(t *testing.T) { } require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)-len(schemaErrors)), getDDLCount()) require.Equal(t, int64(len(schemaErrors)), getDDLErrorCount()) @@ -124,11 +126,12 @@ func TestMiscSidecarDB(t *testing.T) { db := fakesqldb.New(t) defer db.Close() - AddSchemaInitQueries(db, false) + parser := sqlparser.NewTestParser() + AddSchemaInitQueries(db, false, parser) db.AddQuery("use dbname", &sqltypes.Result{}) db.AddQueryPattern("set @@session.sql_mode=.*", &sqltypes.Result{}) - cp := db.ConnParams() + cp := dbconfigs.New(db.ConnParams()) conn, err := cp.Connect(ctx) require.NoError(t, err) exec := func(ctx context.Context, query string, maxRows int, useDB bool) (*sqltypes.Result, error) { @@ -149,29 +152,30 @@ func TestMiscSidecarDB(t *testing.T) { require.NoError(t, err) db.AddQuery(dbeq, result) db.AddQuery(sidecar.GetCreateQuery(), &sqltypes.Result{}) - AddSchemaInitQueries(db, false) + AddSchemaInitQueries(db, false, parser) // tests init on empty db ddlErrorCount.Set(0) ddlCount.Set(0) require.Equal(t, int64(0), getDDLCount()) - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // Include the table DDLs in the expected queries. // This causes them to NOT be created again. - AddSchemaInitQueries(db, true) + AddSchemaInitQueries(db, true, parser) // tests init on already inited db - err = Init(ctx, exec) + err = Init(ctx, exec, parser) require.NoError(t, err) require.Equal(t, int64(len(sidecarTables)), getDDLCount()) // tests misc paths not covered above si := &schemaInit{ - ctx: ctx, - exec: exec, + ctx: ctx, + exec: exec, + parser: parser, } err = si.setCurrentDatabase(sidecar.GetIdentifier()) @@ -196,9 +200,10 @@ func TestValidateSchema(t *testing.T) { {"invalid table name", "t1", "create table if not exists t2(i int)", true}, {"qualifier", "t1", "create table if not exists vt_product.t1(i int)", true}, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { - _, err := validateSchemaDefinition(tc.name, tc.schema) + _, err := validateSchemaDefinition(tc.name, tc.schema, parser) if tc.mustError { require.Error(t, err) } else { @@ -220,13 +225,15 @@ func TestAlterTableAlgorithm(t *testing.T) { {"add column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t1(i int, i1 int)"}, {"modify column", "t1", "create table if not exists _vt.t1(i int)", "create table if not exists _vt.t(i float)"}, } - si := &schemaInit{} + si := &schemaInit{ + parser: sqlparser.NewTestParser(), + } copyAlgo := sqlparser.AlgorithmValue("COPY") for _, tc := range testCases { t.Run(tc.testName, func(t *testing.T) { diff, err := si.findTableSchemaDiff(tc.tableName, tc.currentSchema, tc.desiredSchema) require.NoError(t, err) - stmt, err := sqlparser.Parse(diff) + stmt, err := si.parser.Parse(diff) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) diff --git a/go/vt/sqlparser/analyzer.go b/go/vt/sqlparser/analyzer.go index b4015f7937b..ea0773d99cc 100644 --- a/go/vt/sqlparser/analyzer.go +++ b/go/vt/sqlparser/analyzer.go @@ -344,8 +344,8 @@ func IsDMLStatement(stmt Statement) bool { // TableFromStatement returns the qualified table name for the query. // This works only for select statements. -func TableFromStatement(sql string) (TableName, error) { - stmt, err := Parse(sql) +func (p *Parser) TableFromStatement(sql string) (TableName, error) { + stmt, err := p.Parse(sql) if err != nil { return TableName{}, err } diff --git a/go/vt/sqlparser/analyzer_test.go b/go/vt/sqlparser/analyzer_test.go index 9f6a451770e..0a2de52ef19 100644 --- a/go/vt/sqlparser/analyzer_test.go +++ b/go/vt/sqlparser/analyzer_test.go @@ -145,8 +145,9 @@ func TestSplitAndExpression(t *testing.T) { sql: "select * from t where (a = 1 and ((b = 1 and c = 1)))", out: []string{"a = 1", "b = 1", "c = 1"}, }} + parser := NewTestParser() for _, tcase := range testcases { - stmt, err := Parse(tcase.sql) + stmt, err := parser.Parse(tcase.sql) assert.NoError(t, err) var expr Expr if where := stmt.(*Select).Where; where != nil { @@ -259,9 +260,9 @@ func TestTableFromStatement(t *testing.T) { in: "bad query", out: "syntax error at position 4 near 'bad'", }} - + parser := NewTestParser() for _, tc := range testcases { - name, err := TableFromStatement(tc.in) + name, err := parser.TableFromStatement(tc.in) var got string if err != nil { got = err.Error() @@ -288,8 +289,9 @@ func TestGetTableName(t *testing.T) { out: "", }} + parser := NewTestParser() for _, tc := range testcases { - tree, err := Parse(tc.in) + tree, err := parser.Parse(tc.in) if err != nil { t.Error(err) continue diff --git a/go/vt/sqlparser/ast.go b/go/vt/sqlparser/ast.go index 1ff48b8be78..b510c81767c 100644 --- a/go/vt/sqlparser/ast.go +++ b/go/vt/sqlparser/ast.go @@ -365,8 +365,8 @@ type ( With *With Ignore Ignore Comments *ParsedComments - Targets TableNames TableExprs TableExprs + Targets TableNames Partitions Partitions Where *Where OrderBy OrderBy diff --git a/go/vt/sqlparser/ast_clone.go b/go/vt/sqlparser/ast_clone.go index b29b4c90047..912cba84e6c 100644 --- a/go/vt/sqlparser/ast_clone.go +++ b/go/vt/sqlparser/ast_clone.go @@ -1175,8 +1175,8 @@ func CloneRefOfDelete(n *Delete) *Delete { out := *n out.With = CloneRefOfWith(n.With) out.Comments = CloneRefOfParsedComments(n.Comments) - out.Targets = CloneTableNames(n.Targets) out.TableExprs = CloneTableExprs(n.TableExprs) + out.Targets = CloneTableNames(n.Targets) out.Partitions = ClonePartitions(n.Partitions) out.Where = CloneRefOfWhere(n.Where) out.OrderBy = CloneOrderBy(n.OrderBy) diff --git a/go/vt/sqlparser/ast_copy_on_rewrite.go b/go/vt/sqlparser/ast_copy_on_rewrite.go index 86dda29ebcf..65fab00c890 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite.go @@ -1850,18 +1850,18 @@ func (c *cow) copyOnRewriteRefOfDelete(n *Delete, parent SQLNode) (out SQLNode, if c.pre == nil || c.pre(n, parent) { _With, changedWith := c.copyOnRewriteRefOfWith(n.With, n) _Comments, changedComments := c.copyOnRewriteRefOfParsedComments(n.Comments, n) - _Targets, changedTargets := c.copyOnRewriteTableNames(n.Targets, n) _TableExprs, changedTableExprs := c.copyOnRewriteTableExprs(n.TableExprs, n) + _Targets, changedTargets := c.copyOnRewriteTableNames(n.Targets, n) _Partitions, changedPartitions := c.copyOnRewritePartitions(n.Partitions, n) _Where, changedWhere := c.copyOnRewriteRefOfWhere(n.Where, n) _OrderBy, changedOrderBy := c.copyOnRewriteOrderBy(n.OrderBy, n) _Limit, changedLimit := c.copyOnRewriteRefOfLimit(n.Limit, n) - if changedWith || changedComments || changedTargets || changedTableExprs || changedPartitions || changedWhere || changedOrderBy || changedLimit { + if changedWith || changedComments || changedTableExprs || changedTargets || changedPartitions || changedWhere || changedOrderBy || changedLimit { res := *n res.With, _ = _With.(*With) res.Comments, _ = _Comments.(*ParsedComments) - res.Targets, _ = _Targets.(TableNames) res.TableExprs, _ = _TableExprs.(TableExprs) + res.Targets, _ = _Targets.(TableNames) res.Partitions, _ = _Partitions.(Partitions) res.Where, _ = _Where.(*Where) res.OrderBy, _ = _OrderBy.(OrderBy) diff --git a/go/vt/sqlparser/ast_copy_on_rewrite_test.go b/go/vt/sqlparser/ast_copy_on_rewrite_test.go index 389b2a4bc29..bb2bd5b886e 100644 --- a/go/vt/sqlparser/ast_copy_on_rewrite_test.go +++ b/go/vt/sqlparser/ast_copy_on_rewrite_test.go @@ -24,8 +24,9 @@ import ( ) func TestCopyOnRewrite(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { col, ok := cursor.Node().(*ColName) @@ -42,9 +43,10 @@ func TestCopyOnRewrite(t *testing.T) { } func TestCopyOnRewriteDeeper(t *testing.T) { + parser := NewTestParser() // rewrite an expression without changing the original. the changed happens deep in the syntax tree, // here we are testing that all ancestors up to the root are cloned correctly - expr, err := ParseExpr("a + b * c = 12") + expr, err := parser.ParseExpr("a + b * c = 12") require.NoError(t, err) var path []string out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) { @@ -72,8 +74,9 @@ func TestCopyOnRewriteDeeper(t *testing.T) { } func TestDontCopyWithoutRewrite(t *testing.T) { + parser := NewTestParser() // when no rewriting happens, we want the original back - expr, err := ParseExpr("a = b") + expr, err := parser.ParseExpr("a = b") require.NoError(t, err) out := CopyOnRewrite(expr, nil, func(cursor *CopyOnWriteCursor) {}, nil) @@ -81,9 +84,10 @@ func TestDontCopyWithoutRewrite(t *testing.T) { } func TestStopTreeWalk(t *testing.T) { + parser := NewTestParser() // stop walking down part of the AST original := "a = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) @@ -102,9 +106,10 @@ func TestStopTreeWalk(t *testing.T) { } func TestStopTreeWalkButStillVisit(t *testing.T) { + parser := NewTestParser() // here we are asserting that even when we stop at the binary expression, we still visit it in the post visitor original := "1337 = b + c" - expr, err := ParseExpr(original) + expr, err := parser.ParseExpr(original) require.NoError(t, err) out := CopyOnRewrite(expr, func(node, parent SQLNode) bool { _, ok := node.(*BinaryExpr) diff --git a/go/vt/sqlparser/ast_equals.go b/go/vt/sqlparser/ast_equals.go index 9beed3a8242..0ded1081fc3 100644 --- a/go/vt/sqlparser/ast_equals.go +++ b/go/vt/sqlparser/ast_equals.go @@ -2362,8 +2362,8 @@ func (cmp *Comparator) RefOfDelete(a, b *Delete) bool { return cmp.RefOfWith(a.With, b.With) && a.Ignore == b.Ignore && cmp.RefOfParsedComments(a.Comments, b.Comments) && - cmp.TableNames(a.Targets, b.Targets) && cmp.TableExprs(a.TableExprs, b.TableExprs) && + cmp.TableNames(a.Targets, b.Targets) && cmp.Partitions(a.Partitions, b.Partitions) && cmp.RefOfWhere(a.Where, b.Where) && cmp.OrderBy(a.OrderBy, b.OrderBy) && diff --git a/go/vt/sqlparser/ast_format.go b/go/vt/sqlparser/ast_format.go index 863de56bfba..a61399ae8ae 100644 --- a/go/vt/sqlparser/ast_format.go +++ b/go/vt/sqlparser/ast_format.go @@ -172,7 +172,7 @@ func (node *Delete) Format(buf *TrackedBuffer) { if node.Ignore { buf.literal("ignore ") } - if node.Targets != nil { + if node.Targets != nil && !node.isSingleAliasExpr() { buf.astPrintf(node, "%v ", node.Targets) } buf.astPrintf(node, "from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) diff --git a/go/vt/sqlparser/ast_format_fast.go b/go/vt/sqlparser/ast_format_fast.go index 6f6f3594c18..37d3ddfa5b8 100644 --- a/go/vt/sqlparser/ast_format_fast.go +++ b/go/vt/sqlparser/ast_format_fast.go @@ -257,7 +257,7 @@ func (node *Delete) FormatFast(buf *TrackedBuffer) { if node.Ignore { buf.WriteString("ignore ") } - if node.Targets != nil { + if node.Targets != nil && !node.isSingleAliasExpr() { node.Targets.FormatFast(buf) buf.WriteByte(' ') } diff --git a/go/vt/sqlparser/ast_funcs.go b/go/vt/sqlparser/ast_funcs.go index 3e8b54f7e08..1de529c973b 100644 --- a/go/vt/sqlparser/ast_funcs.go +++ b/go/vt/sqlparser/ast_funcs.go @@ -24,13 +24,11 @@ import ( "strconv" "strings" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" ) // Walk calls postVisit on every node. @@ -2156,25 +2154,31 @@ func (s SelectExprs) AllAggregation() bool { return true } -// RemoveKeyspaceFromColName removes the Qualifier.Qualifier on all ColNames in the expression tree -func RemoveKeyspaceFromColName(expr Expr) { - RemoveKeyspace(expr) -} - // RemoveKeyspace removes the Qualifier.Qualifier on all ColNames in the AST func RemoveKeyspace(in SQLNode) { // Walk will only return an error if we return an error from the inner func. safe to ignore here _ = Walk(func(node SQLNode) (kontinue bool, err error) { - switch col := node.(type) { - case *ColName: - if col.Qualifier.Qualifier.NotEmpty() { - col.Qualifier.Qualifier = NewIdentifierCS("") - } + if col, ok := node.(*ColName); ok && col.Qualifier.Qualifier.NotEmpty() { + col.Qualifier.Qualifier = NewIdentifierCS("") } + return true, nil }, in) } +// RemoveKeyspaceInTables removes the Qualifier on all TableNames in the AST +func RemoveKeyspaceInTables(in SQLNode) { + // Walk will only return an error if we return an error from the inner func. safe to ignore here + Rewrite(in, nil, func(cursor *Cursor) bool { + if tbl, ok := cursor.Node().(TableName); ok && tbl.Qualifier.NotEmpty() { + tbl.Qualifier = NewIdentifierCS("") + cursor.Replace(tbl) + } + + return true + }) +} + func convertStringToInt(integer string) int { val, _ := strconv.Atoi(integer) return val @@ -2536,3 +2540,14 @@ func IsLiteral(expr Expr) bool { func (ct *ColumnType) Invisible() bool { return ct.Options.Invisible != nil && *ct.Options.Invisible } + +func (node *Delete) isSingleAliasExpr() bool { + if len(node.Targets) > 1 { + return false + } + if len(node.TableExprs) != 1 { + return false + } + _, isAliasExpr := node.TableExprs[0].(*AliasedTableExpr) + return isAliasExpr +} diff --git a/go/vt/sqlparser/ast_rewrite.go b/go/vt/sqlparser/ast_rewrite.go index 0121695fe8c..6ec89e9a2ba 100644 --- a/go/vt/sqlparser/ast_rewrite.go +++ b/go/vt/sqlparser/ast_rewrite.go @@ -2455,13 +2455,13 @@ func (a *application) rewriteRefOfDelete(parent SQLNode, node *Delete, replacer }) { return false } - if !a.rewriteTableNames(node, node.Targets, func(newNode, parent SQLNode) { - parent.(*Delete).Targets = newNode.(TableNames) + if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { + parent.(*Delete).TableExprs = newNode.(TableExprs) }) { return false } - if !a.rewriteTableExprs(node, node.TableExprs, func(newNode, parent SQLNode) { - parent.(*Delete).TableExprs = newNode.(TableExprs) + if !a.rewriteTableNames(node, node.Targets, func(newNode, parent SQLNode) { + parent.(*Delete).Targets = newNode.(TableNames) }) { return false } diff --git a/go/vt/sqlparser/ast_rewriting_test.go b/go/vt/sqlparser/ast_rewriting_test.go index 86bab314dd8..3ad9a5298c4 100644 --- a/go/vt/sqlparser/ast_rewriting_test.go +++ b/go/vt/sqlparser/ast_rewriting_test.go @@ -335,11 +335,11 @@ func TestRewrites(in *testing.T) { socket: true, queryTimeout: true, }} - + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST( @@ -353,7 +353,7 @@ func TestRewrites(in *testing.T) { ) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) s := String(expected) @@ -392,7 +392,8 @@ func (*fakeViews) FindView(name TableName) SelectStatement { if name.Name.String() != "user_details" { return nil } - statement, err := Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") + parser := NewTestParser() + statement, err := parser.Parse("select user.id, user.name, user_extra.salary from user join user_extra where user.id = user_extra.user_id") if err != nil { return nil } @@ -434,16 +435,17 @@ func TestRewritesWithSetVarComment(in *testing.T) { setVarComment: "AA(a)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, tc.setVarComment, nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -482,16 +484,17 @@ func TestRewritesSysVar(in *testing.T) { expected: "select :__vttransaction_isolation as `@@session.transaction_isolation` from dual", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "ks", SQLSelectLimitUnset, "", tc.sysVar, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) @@ -532,16 +535,17 @@ func TestRewritesWithDefaultKeyspace(in *testing.T) { expected: "SELECT 2 as `(select 2 from dual)` from DUAL", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { require := require.New(t) - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(err) result, err := RewriteAST(stmt, "sys", SQLSelectLimitUnset, "", nil, nil, &fakeViews{}) require.NoError(err) - expected, err := Parse(tc.expected) + expected, err := parser.Parse(tc.expected) require.NoError(err, "test expectation does not parse [%s]", tc.expected) assert.Equal(t, String(expected), String(result.AST)) diff --git a/go/vt/sqlparser/ast_test.go b/go/vt/sqlparser/ast_test.go index 97b93a80379..b1181e83db1 100644 --- a/go/vt/sqlparser/ast_test.go +++ b/go/vt/sqlparser/ast_test.go @@ -30,8 +30,9 @@ import ( ) func TestAppend(t *testing.T) { + parser := NewTestParser() query := "select * from t where a = 1" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) var b strings.Builder Append(&b, tree) @@ -49,9 +50,10 @@ func TestAppend(t *testing.T) { } func TestSelect(t *testing.T) { - e1, err := ParseExpr("a = 1") + parser := NewTestParser() + e1, err := parser.ParseExpr("a = 1") require.NoError(t, err) - e2, err := ParseExpr("b = 2") + e2, err := parser.ParseExpr("b = 2") require.NoError(t, err) t.Run("single predicate where", func(t *testing.T) { sel := &Select{} @@ -81,7 +83,8 @@ func TestSelect(t *testing.T) { } func TestUpdate(t *testing.T) { - tree, err := Parse("update t set a = 1") + parser := NewTestParser() + tree, err := parser.Parse("update t set a = 1") require.NoError(t, err) upd, ok := tree.(*Update) @@ -103,11 +106,12 @@ func TestUpdate(t *testing.T) { } func TestRemoveHints(t *testing.T) { + parser := NewTestParser() for _, query := range []string{ "select * from t use index (i)", "select * from t force index (i)", } { - tree, err := Parse(query) + tree, err := parser.Parse(query) if err != nil { t.Fatal(err) } @@ -124,16 +128,17 @@ func TestRemoveHints(t *testing.T) { } func TestAddOrder(t *testing.T) { - src, err := Parse("select foo, bar from baz order by foo") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz order by foo") require.NoError(t, err) order := src.(*Select).OrderBy[0] - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).AddOrder(order) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t order by foo asc", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).AddOrder(order) buf = NewTrackedBuffer(nil) @@ -142,16 +147,17 @@ func TestAddOrder(t *testing.T) { } func TestSetLimit(t *testing.T) { - src, err := Parse("select foo, bar from baz limit 4") + parser := NewTestParser() + src, err := parser.Parse("select foo, bar from baz limit 4") require.NoError(t, err) limit := src.(*Select).Limit - dst, err := Parse("select * from t") + dst, err := parser.Parse("select * from t") require.NoError(t, err) dst.(*Select).SetLimit(limit) buf := NewTrackedBuffer(nil) dst.Format(buf) require.Equal(t, "select * from t limit 4", buf.String()) - dst, err = Parse("select * from t union select * from s") + dst, err = parser.Parse("select * from t union select * from s") require.NoError(t, err) dst.(*Union).SetLimit(limit) buf = NewTrackedBuffer(nil) @@ -213,8 +219,9 @@ func TestDDL(t *testing.T) { }, affected: []string{"a", "b"}, }} + parser := NewTestParser() for _, tcase := range testcases { - got, err := Parse(tcase.query) + got, err := parser.Parse(tcase.query) if err != nil { t.Fatal(err) } @@ -232,7 +239,8 @@ func TestDDL(t *testing.T) { } func TestSetAutocommitON(t *testing.T) { - stmt, err := Parse("SET autocommit=ON") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=ON") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -257,7 +265,7 @@ func TestSetAutocommitON(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=ON") + stmt, err = parser.Parse("SET @@session.autocommit=ON") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -284,7 +292,8 @@ func TestSetAutocommitON(t *testing.T) { } func TestSetAutocommitOFF(t *testing.T) { - stmt, err := Parse("SET autocommit=OFF") + parser := NewTestParser() + stmt, err := parser.Parse("SET autocommit=OFF") require.NoError(t, err) s, ok := stmt.(*Set) if !ok { @@ -309,7 +318,7 @@ func TestSetAutocommitOFF(t *testing.T) { t.Errorf("SET statement expression is not Literal: %T", e.Expr) } - stmt, err = Parse("SET @@session.autocommit=OFF") + stmt, err = parser.Parse("SET @@session.autocommit=OFF") require.NoError(t, err) s, ok = stmt.(*Set) if !ok { @@ -491,9 +500,10 @@ func TestReplaceExpr(t *testing.T) { out: "case a when b then c when d then c else :a end", }} to := NewArgument("a") + parser := NewTestParser() for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - tree, err := Parse(tcase.in) + tree, err := parser.Parse(tcase.in) require.NoError(t, err) var from *Subquery _ = Walk(func(node SQLNode) (kontinue bool, err error) { @@ -738,13 +748,14 @@ func TestSplitStatementToPieces(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - stmtPieces, err := SplitStatementToPieces(tcase.input) + stmtPieces, err := parser.SplitStatementToPieces(tcase.input) require.NoError(t, err) out := strings.Join(stmtPieces, ";") @@ -766,13 +777,15 @@ func TestDefaultStatus(t *testing.T) { } func TestShowTableStatus(t *testing.T) { + parser := NewTestParser() query := "Show Table Status FROM customer" - tree, err := Parse(query) + tree, err := parser.Parse(query) require.NoError(t, err) require.NotNil(t, tree) } func BenchmarkStringTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -782,7 +795,7 @@ func BenchmarkStringTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) for _, q := range queries { - pp, err := Parse(q) + pp, err := parser.Parse(q) if err != nil { b.Fatal(err) } diff --git a/go/vt/sqlparser/ast_visit.go b/go/vt/sqlparser/ast_visit.go index a88d689f102..bb2ec7c3500 100644 --- a/go/vt/sqlparser/ast_visit.go +++ b/go/vt/sqlparser/ast_visit.go @@ -1377,10 +1377,10 @@ func VisitRefOfDelete(in *Delete, f Visit) error { if err := VisitRefOfParsedComments(in.Comments, f); err != nil { return err } - if err := VisitTableNames(in.Targets, f); err != nil { + if err := VisitTableExprs(in.TableExprs, f); err != nil { return err } - if err := VisitTableExprs(in.TableExprs, f); err != nil { + if err := VisitTableNames(in.Targets, f); err != nil { return err } if err := VisitPartitions(in.Partitions, f); err != nil { diff --git a/go/vt/sqlparser/cached_size.go b/go/vt/sqlparser/cached_size.go index d86b8a21155..ebac6a68e23 100644 --- a/go/vt/sqlparser/cached_size.go +++ b/go/vt/sqlparser/cached_size.go @@ -1106,13 +1106,6 @@ func (cached *Delete) CachedSize(alloc bool) int64 { size += cached.With.CachedSize(true) // field Comments *vitess.io/vitess/go/vt/sqlparser.ParsedComments size += cached.Comments.CachedSize(true) - // field Targets vitess.io/vitess/go/vt/sqlparser.TableNames - { - size += hack.RuntimeAllocSize(int64(cap(cached.Targets)) * int64(32)) - for _, elem := range cached.Targets { - size += elem.CachedSize(false) - } - } // field TableExprs vitess.io/vitess/go/vt/sqlparser.TableExprs { size += hack.RuntimeAllocSize(int64(cap(cached.TableExprs)) * int64(16)) @@ -1122,6 +1115,13 @@ func (cached *Delete) CachedSize(alloc bool) int64 { } } } + // field Targets vitess.io/vitess/go/vt/sqlparser.TableNames + { + size += hack.RuntimeAllocSize(int64(cap(cached.Targets)) * int64(32)) + for _, elem := range cached.Targets { + size += elem.CachedSize(false) + } + } // field Partitions vitess.io/vitess/go/vt/sqlparser.Partitions { size += hack.RuntimeAllocSize(int64(cap(cached.Partitions)) * int64(32)) @@ -3064,7 +3064,7 @@ func (cached *ParsedQuery) CachedSize(alloc bool) int64 { } // field Query string size += hack.RuntimeAllocSize(int64(len(cached.Query))) - // field bindLocations []vitess.io/vitess/go/vt/sqlparser.bindLocation + // field bindLocations []vitess.io/vitess/go/vt/sqlparser.BindLocation { size += hack.RuntimeAllocSize(int64(cap(cached.bindLocations)) * int64(16)) } diff --git a/go/vt/sqlparser/comments_test.go b/go/vt/sqlparser/comments_test.go index 734cd28e088..dd22fd7000c 100644 --- a/go/vt/sqlparser/comments_test.go +++ b/go/vt/sqlparser/comments_test.go @@ -322,6 +322,7 @@ func TestExtractCommentDirectives(t *testing.T) { }, }} + parser := NewTestParser() for _, testCase := range testCases { t.Run(testCase.input, func(t *testing.T) { sqls := []string{ @@ -339,7 +340,7 @@ func TestExtractCommentDirectives(t *testing.T) { for _, sql := range sqls { t.Run(sql, func(t *testing.T) { var comments *ParsedComments - stmt, _ := Parse(sql) + stmt, _ := parser.Parse(sql) switch s := stmt.(type) { case *Select: comments = s.Comments @@ -394,19 +395,20 @@ func TestExtractCommentDirectives(t *testing.T) { } func TestSkipQueryPlanCacheDirective(t *testing.T) { - stmt, _ := Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") + parser := NewTestParser() + stmt, _ := parser.Parse("insert /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ into user(id) values (1), (2)") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("insert into user(id) values (1), (2)") + stmt, _ = parser.Parse("insert into user(id) values (1), (2)") assert.True(t, CachePlan(stmt)) - stmt, _ = Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") + stmt, _ = parser.Parse("update /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ users set name=1") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") + stmt, _ = parser.Parse("select /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ * from users") assert.False(t, CachePlan(stmt)) - stmt, _ = Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") + stmt, _ = parser.Parse("delete /*vt+ SKIP_QUERY_PLAN_CACHE=1 */ from users") assert.False(t, CachePlan(stmt)) } @@ -427,9 +429,10 @@ func TestIgnoreMaxPayloadSizeDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxPayloadSizeDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -453,9 +456,10 @@ func TestIgnoreMaxMaxMemoryRowsDirective(t *testing.T) { {"show create table users", false}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := IgnoreMaxMaxMemoryRowsDirective(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("IgnoreMaxPayloadSizeDirective(stmt) returned %v but expected %v", got, test.expected)) }) @@ -479,9 +483,10 @@ func TestConsolidator(t *testing.T) { {"select /*vt+ CONSOLIDATOR=enabled_replicas */ * from users", querypb.ExecuteOptions_CONSOLIDATOR_ENABLED_REPLICAS}, } + parser := NewTestParser() for _, test := range testCases { t.Run(test.query, func(t *testing.T) { - stmt, _ := Parse(test.query) + stmt, _ := parser.Parse(test.query) got := Consolidator(stmt) assert.Equalf(t, test.expected, got, fmt.Sprintf("Consolidator(stmt) returned %v but expected %v", got, test.expected)) }) @@ -536,11 +541,12 @@ func TestGetPriorityFromStatement(t *testing.T) { }, } + parser := NewTestParser() for _, testCase := range testCases { theThestCase := testCase t.Run(theThestCase.query, func(t *testing.T) { t.Parallel() - stmt, err := Parse(theThestCase.query) + stmt, err := parser.Parse(theThestCase.query) assert.NoError(t, err) actualPriority, actualError := GetPriorityFromStatement(stmt) if theThestCase.expectedError != nil { diff --git a/go/vt/sqlparser/keywords.go b/go/vt/sqlparser/keywords.go index 968c2da4e7e..5c4ddb4b4f3 100644 --- a/go/vt/sqlparser/keywords.go +++ b/go/vt/sqlparser/keywords.go @@ -818,14 +818,6 @@ func (cit *caseInsensitiveTable) LookupString(name string) (int, bool) { return 0, false } -func (cit *caseInsensitiveTable) Lookup(name []byte) (int, bool) { - hash := fnv1aI(offset64, name) - if candidate, ok := cit.h[hash]; ok { - return candidate.id, candidate.match(name) - } - return 0, false -} - func init() { for _, kw := range keywords { if kw.id == UNUSED { @@ -853,16 +845,6 @@ func KeywordString(id int) string { const offset64 = uint64(14695981039346656037) const prime64 = uint64(1099511628211) -func fnv1aI(h uint64, s []byte) uint64 { - for _, c := range s { - if 'A' <= c && c <= 'Z' { - c += 'a' - 'A' - } - h = (h ^ uint64(c)) * prime64 - } - return h -} - func fnv1aIstr(h uint64, s string) uint64 { for i := 0; i < len(s); i++ { c := s[i] diff --git a/go/vt/sqlparser/keywords_test.go b/go/vt/sqlparser/keywords_test.go index 0209ee20352..d386339a57f 100644 --- a/go/vt/sqlparser/keywords_test.go +++ b/go/vt/sqlparser/keywords_test.go @@ -32,6 +32,7 @@ func TestCompatibility(t *testing.T) { require.NoError(t, err) defer file.Close() + parser := NewTestParser() scanner := bufio.NewScanner(file) skipStep := 4 for scanner.Scan() { @@ -46,7 +47,7 @@ func TestCompatibility(t *testing.T) { word = "`" + word + "`" } sql := fmt.Sprintf("create table %s(c1 int)", word) - _, err := ParseStrictDDL(sql) + _, err := parser.ParseStrictDDL(sql) if err != nil { t.Errorf("%s is not compatible with mysql", word) } diff --git a/go/vt/sqlparser/like_filter_test.go b/go/vt/sqlparser/like_filter_test.go index 242e45e2f8d..3249eb152b9 100644 --- a/go/vt/sqlparser/like_filter_test.go +++ b/go/vt/sqlparser/like_filter_test.go @@ -30,7 +30,8 @@ func TestEmptyLike(t *testing.T) { } func TestLikePrefixRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like 'key%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like 'key%'") if e != nil { t.Error(e) } @@ -42,7 +43,8 @@ func TestLikePrefixRegexp(t *testing.T) { } func TestLikeAnyCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '%val1%val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '%val1%val2%'") if e != nil { t.Error(e) } @@ -54,7 +56,8 @@ func TestLikeAnyCharsRegexp(t *testing.T) { } func TestSingleAndMultipleCharsRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '_val1_val2%'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '_val1_val2%'") if e != nil { t.Error(e) } @@ -66,7 +69,8 @@ func TestSingleAndMultipleCharsRegexp(t *testing.T) { } func TestSpecialCharactersRegexp(t *testing.T) { - show, e := Parse("show vitess_metadata variables like '?.*?'") + parser := NewTestParser() + show, e := parser.Parse("show vitess_metadata variables like '?.*?'") if e != nil { t.Error(e) } @@ -78,7 +82,8 @@ func TestSpecialCharactersRegexp(t *testing.T) { } func TestQuoteLikeSpecialCharacters(t *testing.T) { - show, e := Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) + parser := NewTestParser() + show, e := parser.Parse(`show vitess_metadata variables like 'part1_part2\\%part3_part4\\_part5%'`) if e != nil { t.Error(e) } diff --git a/go/vt/sqlparser/normalizer_test.go b/go/vt/sqlparser/normalizer_test.go index f0771d437fa..18f2ad44a7f 100644 --- a/go/vt/sqlparser/normalizer_test.go +++ b/go/vt/sqlparser/normalizer_test.go @@ -389,9 +389,10 @@ func TestNormalize(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3), }, }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -416,9 +417,10 @@ func TestNormalizeInvalidDates(t *testing.T) { in: "select timestamp'foo'", err: vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.WrongValue, "Incorrect DATETIME value: '%s'", "foo"), }} + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse(tc.in) + stmt, err := parser.Parse(tc.in) require.NoError(t, err) known := GetBindvars(stmt) bv := make(map[string]*querypb.BindVariable) @@ -428,12 +430,13 @@ func TestNormalizeInvalidDates(t *testing.T) { } func TestNormalizeValidSQL(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.partialDDL || tcase.ignoreNormalizerTest { return } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -447,7 +450,7 @@ func TestNormalizeValidSQL(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) }) } @@ -463,7 +466,8 @@ func TestNormalizeOneCasae(t *testing.T) { if testOne.input == "" { t.Skip("empty test case") } - tree, err := Parse(testOne.input) + parser := NewTestParser() + tree, err := parser.Parse(testOne.input) require.NoError(t, err, testOne.input) // Skip the test for the queries that do not run the normalizer if !CanNormalize(tree) { @@ -477,12 +481,13 @@ func TestNormalizeOneCasae(t *testing.T) { if normalizerOutput == "otheradmin" || normalizerOutput == "otherread" { return } - _, err = Parse(normalizerOutput) + _, err = parser.Parse(normalizerOutput) require.NoError(t, err, normalizerOutput) } func TestGetBindVars(t *testing.T) { - stmt, err := Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") + parser := NewTestParser() + stmt, err := parser.Parse("select * from t where :v1 = :v2 and :v2 = :v3 and :v4 in ::v5") if err != nil { t.Fatal(err) } @@ -506,8 +511,9 @@ Prior to skip: BenchmarkNormalize-8 500000 3620 ns/op 1461 B/op 55 allocs/op */ func BenchmarkNormalize(b *testing.B) { + parser := NewTestParser() sql := "select 'abcd', 20, 30.0, eid from a where 1=eid and name='3'" - ast, reservedVars, err := Parse2(sql) + ast, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -517,6 +523,7 @@ func BenchmarkNormalize(b *testing.B) { } func BenchmarkNormalizeTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -527,7 +534,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { parsed := make([]Statement, 0, len(queries)) reservedVars := make([]BindVars, 0, len(queries)) for _, q := range queries { - pp, kb, err := Parse2(q) + pp, kb, err := parser.Parse2(q) if err != nil { b.Fatal(err) } @@ -549,6 +556,7 @@ func BenchmarkNormalizeTraces(b *testing.B) { func BenchmarkNormalizeVTGate(b *testing.B) { const keyspace = "main_keyspace" + parser := NewTestParser() queries := loadQueries(b, "lobsters.sql.gz") if len(queries) > 10000 { @@ -560,7 +568,7 @@ func BenchmarkNormalizeVTGate(b *testing.B) { for i := 0; i < b.N; i++ { for _, sql := range queries { - stmt, reservedVars, err := Parse2(sql) + stmt, reservedVars, err := parser.Parse2(sql) if err != nil { b.Fatal(err) } @@ -856,9 +864,10 @@ func benchmarkNormalization(b *testing.B, sqls []string) { b.Helper() b.ReportAllocs() b.ResetTimer() + parser := NewTestParser() for i := 0; i < b.N; i++ { for _, sql := range sqls { - stmt, reserved, err := Parse2(sql) + stmt, reserved, err := parser.Parse2(sql) if err != nil { b.Fatalf("%v: %q", err, sql) } diff --git a/go/vt/sqlparser/parse_next_test.go b/go/vt/sqlparser/parse_next_test.go index 756bf4fb3d0..687bb7fbb51 100644 --- a/go/vt/sqlparser/parse_next_test.go +++ b/go/vt/sqlparser/parse_next_test.go @@ -34,7 +34,8 @@ func TestParseNextValid(t *testing.T) { sql.WriteRune(';') } - tokens := NewStringTokenizer(sql.String()) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(sql.String()) for _, tcase := range validSQL { want := tcase.output if want == "" { @@ -54,7 +55,8 @@ func TestParseNextValid(t *testing.T) { func TestIgnoreSpecialComments(t *testing.T) { input := `SELECT 1;/*! ALTER TABLE foo DISABLE KEYS */;SELECT 2;` - tokenizer := NewStringTokenizer(input) + parser := NewTestParser() + tokenizer := parser.NewStringTokenizer(input) tokenizer.SkipSpecialComments = true one, err := ParseNextStrictDDL(tokenizer) require.NoError(t, err) @@ -67,6 +69,7 @@ func TestIgnoreSpecialComments(t *testing.T) { // TestParseNextErrors tests all the error cases, and ensures a valid // SQL statement can be passed afterwards. func TestParseNextErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { if tcase.excludeMulti { // Skip tests which leave unclosed strings, or comments. @@ -74,7 +77,7 @@ func TestParseNextErrors(t *testing.T) { } t.Run(tcase.input, func(t *testing.T) { sql := tcase.input + "; select 1 from t" - tokens := NewStringTokenizer(sql) + tokens := parser.NewStringTokenizer(sql) // The first statement should be an error _, err := ParseNextStrictDDL(tokens) @@ -133,9 +136,9 @@ func TestParseNextEdgeCases(t *testing.T) { input: "create table a ignore me this is garbage; select 1 from a", want: []string{"create table a", "select 1 from a"}, }} - + parser := NewTestParser() for _, test := range tests { - tokens := NewStringTokenizer(test.input) + tokens := parser.NewStringTokenizer(test.input) for i, want := range test.want { tree, err := ParseNext(tokens) @@ -165,7 +168,8 @@ func TestParseNextStrictNonStrict(t *testing.T) { want := []string{"create table a", "select 1 from a"} // First go through as expected with non-strict DDL parsing. - tokens := NewStringTokenizer(input) + parser := NewTestParser() + tokens := parser.NewStringTokenizer(input) for i, want := range want { tree, err := ParseNext(tokens) if err != nil { @@ -177,7 +181,7 @@ func TestParseNextStrictNonStrict(t *testing.T) { } // Now try again with strict parsing and observe the expected error. - tokens = NewStringTokenizer(input) + tokens = parser.NewStringTokenizer(input) _, err := ParseNextStrictDDL(tokens) if err == nil || !strings.Contains(err.Error(), "ignore") { t.Fatalf("ParseNext(%q) err = %q, want ignore", input, err) diff --git a/go/vt/sqlparser/parse_table.go b/go/vt/sqlparser/parse_table.go index 8766994ecfd..d522a855054 100644 --- a/go/vt/sqlparser/parse_table.go +++ b/go/vt/sqlparser/parse_table.go @@ -23,8 +23,8 @@ import ( // ParseTable parses the input as a qualified table name. // It handles all valid literal escaping. -func ParseTable(input string) (keyspace, table string, err error) { - tokenizer := NewStringTokenizer(input) +func (p *Parser) ParseTable(input string) (keyspace, table string, err error) { + tokenizer := p.NewStringTokenizer(input) // Start, want ID token, value := tokenizer.Scan() diff --git a/go/vt/sqlparser/parse_table_test.go b/go/vt/sqlparser/parse_table_test.go index 09e7ea44177..5f187cbc6d0 100644 --- a/go/vt/sqlparser/parse_table_test.go +++ b/go/vt/sqlparser/parse_table_test.go @@ -56,8 +56,9 @@ func TestParseTable(t *testing.T) { input: "k.t.", err: true, }} + parser := NewTestParser() for _, tcase := range testcases { - keyspace, table, err := ParseTable(tcase.input) + keyspace, table, err := parser.ParseTable(tcase.input) assert.Equal(t, tcase.keyspace, keyspace) assert.Equal(t, tcase.table, table) if tcase.err { diff --git a/go/vt/sqlparser/parse_test.go b/go/vt/sqlparser/parse_test.go index d2396cabe17..bb51bbb2479 100644 --- a/go/vt/sqlparser/parse_test.go +++ b/go/vt/sqlparser/parse_test.go @@ -1360,7 +1360,7 @@ var ( input: "delete /* limit */ from a limit b", }, { input: "delete /* alias where */ t.* from a as t where t.id = 2", - output: "delete /* alias where */ t from a as t where t.id = 2", + output: "delete /* alias where */ from a as t where t.id = 2", }, { input: "delete t.* from t, t1", output: "delete t from t, t1", @@ -3697,12 +3697,13 @@ var ( ) func TestValid(t *testing.T) { + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err, tcase.input) out := String(tree) assert.Equal(t, tcase.output, out) @@ -3734,6 +3735,7 @@ func TestParallelValid(t *testing.T) { wg := sync.WaitGroup{} wg.Add(parallelism) + parser := NewTestParser() for i := 0; i < parallelism; i++ { go func() { defer wg.Done() @@ -3742,7 +3744,7 @@ func TestParallelValid(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("Parse(%q) err: %v, want nil", tcase.input, err) continue @@ -3941,9 +3943,10 @@ func TestInvalid(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) require.Error(t, err) require.Contains(t, err.Error(), tcase.err) }) @@ -4081,12 +4084,13 @@ func TestIntroducers(t *testing.T) { input: "select _utf8mb3 'x'", output: "select _utf8mb3 'x' from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) assert.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4175,11 +4179,12 @@ func TestCaseSensitivity(t *testing.T) { }, { input: "select /* use */ 1 from t1 use index (A) where b = 1", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4274,11 +4279,12 @@ func TestKeywords(t *testing.T) { output: "select current_user(), current_user() from dual", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4351,11 +4357,12 @@ func TestConvert(t *testing.T) { input: "select cast(json_keys(c) as char(64) array) from t", }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4399,7 +4406,7 @@ func TestConvert(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4437,12 +4444,13 @@ func TestSelectInto(t *testing.T) { output: "alter vschema create vindex my_vdx using `hash`", }} + parser := NewTestParser() for _, tcase := range validSQL { t.Run(tcase.input, func(t *testing.T) { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) require.NoError(t, err) out := String(tree) assert.Equal(t, tcase.output, out) @@ -4461,7 +4469,7 @@ func TestSelectInto(t *testing.T) { }} for _, tcase := range invalidSQL { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -4498,8 +4506,9 @@ func TestPositionedErr(t *testing.T) { output: PositionedErr{"syntax error", 34, ""}, }} + parser := NewTestParser() for _, tcase := range invalidSQL { - tkn := NewStringTokenizer(tcase.input) + tkn := parser.NewStringTokenizer(tcase.input) _, err := ParseNext(tkn) if posErr, ok := err.(PositionedErr); !ok { @@ -4548,11 +4557,12 @@ func TestSubStr(t *testing.T) { output: `select substr(substr('foo', 1), 2) from t`, }} + parser := NewTestParser() for _, tcase := range validSQL { if tcase.output == "" { tcase.output = tcase.input } - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -4572,8 +4582,9 @@ func TestLoadData(t *testing.T) { "load data infile 'x.txt' into table 'c'", "load data from s3 'x.txt' into table x"} + parser := NewTestParser() for _, tcase := range validSQL { - _, err := Parse(tcase) + _, err := parser.Parse(tcase) require.NoError(t, err) } } @@ -5750,10 +5761,11 @@ partition by range (YEAR(purchased)) subpartition by hash (TO_DAYS(purchased)) output: "create table t (\n\tid int,\n\tinfo JSON,\n\tkey zips ((cast(info -> '$.field' as unsigned array)))\n)", }, } + parser := NewTestParser() for _, test := range createTableQueries { sql := strings.TrimSpace(test.input) t.Run(sql, func(t *testing.T) { - tree, err := ParseStrictDDL(sql) + tree, err := parser.ParseStrictDDL(sql) require.NoError(t, err) got := String(tree) expected := test.output @@ -5776,7 +5788,8 @@ func TestOne(t *testing.T) { return } sql := strings.TrimSpace(testOne.input) - tree, err := Parse(sql) + parser := NewTestParser() + tree, err := parser.Parse(sql) require.NoError(t, err) got := String(tree) expected := testOne.output @@ -5805,8 +5818,9 @@ func TestCreateTableLike(t *testing.T) { "create table ks.a like unsharded_ks.b", }, } + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5835,8 +5849,9 @@ func TestCreateTableEscaped(t *testing.T) { "\tprimary key (`delete`)\n" + ")", }} + parser := NewTestParser() for _, tcase := range testCases { - tree, err := ParseStrictDDL(tcase.input) + tree, err := parser.ParseStrictDDL(tcase.input) if err != nil { t.Errorf("input: %s, err: %v", tcase.input, err) continue @@ -5981,9 +5996,10 @@ var ( ) func TestErrors(t *testing.T) { + parser := NewTestParser() for _, tcase := range invalidSQL { t.Run(tcase.input, func(t *testing.T) { - _, err := ParseStrictDDL(tcase.input) + _, err := parser.ParseStrictDDL(tcase.input) require.Error(t, err, tcase.output) require.Equal(t, tcase.output, err.Error()) }) @@ -6016,8 +6032,9 @@ func TestSkipToEnd(t *testing.T) { input: "create table a bb 'a;'; select * from t", output: "extra characters encountered after end of DDL: 'select'", }} + parser := NewTestParser() for _, tcase := range testcases { - _, err := Parse(tcase.input) + _, err := parser.Parse(tcase.input) if err == nil || err.Error() != tcase.output { t.Errorf("%s: %v, want %s", tcase.input, err, tcase.output) } @@ -6049,8 +6066,9 @@ func loadQueries(t testing.TB, filename string) (queries []string) { } func TestParseDjangoQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "django_queries.txt") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6058,8 +6076,9 @@ func TestParseDjangoQueries(t *testing.T) { } func TestParseLobstersQueries(t *testing.T) { + parser := NewTestParser() for _, query := range loadQueries(t, "lobsters.sql.gz") { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { t.Errorf("failed to parse %q: %v", query, err) } @@ -6074,14 +6093,14 @@ func TestParseVersionedComments(t *testing.T) { }{ { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "50401", + mysqlVersion: "5.4.1", output: `create table table1 ( id int ) ENGINE InnoDB, CHARSET utf8mb4`, }, { input: `CREATE TABLE table1 (id int) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 /*!50900 PARTITION BY RANGE (id) (PARTITION x VALUES LESS THAN (5) ENGINE = InnoDB, PARTITION t VALUES LESS THAN (20) ENGINE = InnoDB) */`, - mysqlVersion: "80001", + mysqlVersion: "8.0.1", output: `create table table1 ( id int ) ENGINE InnoDB, @@ -6094,10 +6113,9 @@ partition by range (id) for _, testcase := range testcases { t.Run(testcase.input+":"+testcase.mysqlVersion, func(t *testing.T) { - oldMySQLVersion := mySQLParserVersion - defer func() { mySQLParserVersion = oldMySQLVersion }() - mySQLParserVersion = testcase.mysqlVersion - tree, err := Parse(testcase.input) + parser, err := New(Options{MySQLServerVersion: testcase.mysqlVersion}) + require.NoError(t, err) + tree, err := parser.Parse(testcase.input) require.NoError(t, err, testcase.input) out := String(tree) require.Equal(t, testcase.output, out) @@ -6106,6 +6124,7 @@ partition by range (id) } func BenchmarkParseTraces(b *testing.B) { + parser := NewTestParser() for _, trace := range []string{"django_queries.txt", "lobsters.sql.gz"} { b.Run(trace, func(b *testing.B) { queries := loadQueries(b, trace) @@ -6117,7 +6136,7 @@ func BenchmarkParseTraces(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - _, err := Parse(query) + _, err := parser.Parse(query) if err != nil { b.Fatal(err) } @@ -6134,6 +6153,7 @@ func BenchmarkParseStress(b *testing.B) { sql2 = "select aaaa, bbb, ccc, ddd, eeee, ffff, gggg, hhhh, iiii from tttt, ttt1, ttt3 where aaaa = bbbb and bbbb = cccc and dddd+1 = eeee group by fff, gggg having hhhh = iiii and iiii = jjjj order by kkkk, llll limit 3, 4" ) + parser := NewTestParser() for i, sql := range []string{sql1, sql2} { b.Run(fmt.Sprintf("sql%d", i), func(b *testing.B) { var buf strings.Builder @@ -6143,7 +6163,7 @@ func BenchmarkParseStress(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := Parse(querySQL) + _, err := parser.Parse(querySQL) if err != nil { b.Fatal(err) } @@ -6182,8 +6202,9 @@ func BenchmarkParse3(b *testing.B) { b.ResetTimer() b.ReportAllocs() + parser := NewTestParser() for i := 0; i < b.N; i++ { - if _, err := Parse(benchQuery); err != nil { + if _, err := parser.Parse(benchQuery); err != nil { b.Fatal(err) } } @@ -6234,6 +6255,7 @@ func escapeNewLines(in string) string { } func testFile(t *testing.T, filename, tempDir string) { + parser := NewTestParser() t.Run(filename, func(t *testing.T) { fail := false expected := strings.Builder{} @@ -6243,7 +6265,7 @@ func testFile(t *testing.T, filename, tempDir string) { tcase.output = tcase.input } expected.WriteString(fmt.Sprintf("%sINPUT\n%s\nEND\n", tcase.comments, escapeNewLines(tcase.input))) - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if tcase.errStr != "" { errPresent := "" if err != nil { @@ -6346,7 +6368,7 @@ func parsePartial(r *bufio.Reader, readType []string, lineno int, fileName strin if returnTypeNumber != -1 { break } - panic(fmt.Errorf("error reading file %s: line %d: %s - Expected keyword", fileName, lineno, err.Error())) + panic(fmt.Errorf("error reading file %s: line %d: Expected keyword", fileName, lineno)) } input := "" for { diff --git a/go/vt/sqlparser/parsed_query.go b/go/vt/sqlparser/parsed_query.go index b6b03a1901a..a612e555ee8 100644 --- a/go/vt/sqlparser/parsed_query.go +++ b/go/vt/sqlparser/parsed_query.go @@ -21,12 +21,7 @@ import ( "fmt" "strings" - "vitess.io/vitess/go/bytes2" - vjson "vitess.io/vitess/go/mysql/json" "vitess.io/vitess/go/sqltypes" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -34,11 +29,12 @@ import ( // bind locations are precomputed for fast substitutions. type ParsedQuery struct { Query string - bindLocations []bindLocation + bindLocations []BindLocation + truncateUILen int } -type bindLocation struct { - offset, length int +type BindLocation struct { + Offset, Length int } // NewParsedQuery returns a ParsedQuery of the ast. @@ -67,8 +63,8 @@ func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVaria func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) error { current := 0 for _, loc := range pq.bindLocations { - buf.WriteString(pq.Query[current:loc.offset]) - name := pq.Query[loc.offset : loc.offset+loc.length] + buf.WriteString(pq.Query[current:loc.Offset]) + name := pq.Query[loc.Offset : loc.Offset+loc.Length] if encodable, ok := extras[name[1:]]; ok { encodable.EncodeSQL(buf) } else { @@ -78,86 +74,19 @@ func (pq *ParsedQuery) Append(buf *strings.Builder, bindVariables map[string]*qu } EncodeValue(buf, supplied) } - current = loc.offset + loc.length + current = loc.Offset + loc.Length } buf.WriteString(pq.Query[current:]) return nil } -// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that -// the fields in the row are in the same order as the placeholders in this query. The fields might include generated -// columns which are dropped, by checking against skipFields, before binding the variables -// note: there can be more fields than bind locations since extra columns might be requested from the source if not all -// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for -// values from the database on the source: sum/count for aggregation queries, for example -func (pq *ParsedQuery) AppendFromRow(buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { - if len(fields) < len(pq.bindLocations) { - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", - len(fields), len(pq.bindLocations)) - } - - type colInfo struct { - typ querypb.Type - length int64 - offset int64 - } - rowInfo := make([]*colInfo, 0) - - offset := int64(0) - for i, field := range fields { // collect info required for fields to be bound - length := row.Lengths[i] - if !skipFields[strings.ToLower(field.Name)] { - rowInfo = append(rowInfo, &colInfo{ - typ: field.Type, - length: length, - offset: offset, - }) - } - if length > 0 { - offset += row.Lengths[i] - } - } - - // bind field values to locations - var offsetQuery int - for i, loc := range pq.bindLocations { - col := rowInfo[i] - buf.WriteString(pq.Query[offsetQuery:loc.offset]) - typ := col.typ - - switch typ { - case querypb.Type_TUPLE: - return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) - case querypb.Type_JSON: - if col.length < 0 { // An SQL NULL and not an actual JSON value - buf.WriteString(sqltypes.NullStr) - } else { // A JSON value (which may be a JSON null literal value) - buf2 := row.Values[col.offset : col.offset+col.length] - vv, err := vjson.MarshalSQLValue(buf2) - if err != nil { - return err - } - buf.WriteString(vv.RawStr()) - } - default: - if col.length < 0 { - // -1 means a null variable; serialize it directly - buf.WriteString(sqltypes.NullStr) - } else { - vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) - vv.EncodeSQLBytes2(buf) - } - } - offsetQuery = loc.offset + loc.length - } - buf.WriteString(pq.Query[offsetQuery:]) - return nil +func (pq *ParsedQuery) BindLocations() []BindLocation { + return pq.bindLocations } // MarshalJSON is a custom JSON marshaler for ParsedQuery. -// Note that any queries longer that 512 bytes will be truncated. func (pq *ParsedQuery) MarshalJSON() ([]byte, error) { - return json.Marshal(TruncateForUI(pq.Query)) + return json.Marshal(pq.Query) } // EncodeValue encodes one bind variable value into the query. diff --git a/go/vt/sqlparser/parsed_query_test.go b/go/vt/sqlparser/parsed_query_test.go index 8c89a51984d..ef59676883f 100644 --- a/go/vt/sqlparser/parsed_query_test.go +++ b/go/vt/sqlparser/parsed_query_test.go @@ -27,7 +27,8 @@ import ( ) func TestNewParsedQuery(t *testing.T) { - stmt, err := Parse("select * from a where id =:id") + parser := NewTestParser() + stmt, err := parser.Parse("select * from a where id =:id") if err != nil { t.Error(err) return @@ -35,7 +36,7 @@ func TestNewParsedQuery(t *testing.T) { pq := NewParsedQuery(stmt) want := &ParsedQuery{ Query: "select * from a where id = :id", - bindLocations: []bindLocation{{offset: 27, length: 3}}, + bindLocations: []BindLocation{{Offset: 27, Length: 3}}, } if !reflect.DeepEqual(pq, want) { t.Errorf("GenerateParsedQuery: %+v, want %+v", pq, want) @@ -135,8 +136,9 @@ func TestGenerateQuery(t *testing.T) { }, } + parser := NewTestParser() for _, tcase := range tcases { - tree, err := Parse(tcase.query) + tree, err := parser.Parse(tcase.query) if err != nil { t.Errorf("parse failed for %s: %v", tcase.desc, err) continue diff --git a/go/vt/sqlparser/parser.go b/go/vt/sqlparser/parser.go index ae630ce3dea..4021d4d61be 100644 --- a/go/vt/sqlparser/parser.go +++ b/go/vt/sqlparser/parser.go @@ -23,16 +23,13 @@ import ( "strings" "sync" - "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vterrors" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -var versionFlagSync sync.Once - // parserPool is a pool for parser objects. var parserPool = sync.Pool{ New: func() any { @@ -43,9 +40,6 @@ var parserPool = sync.Pool{ // zeroParser is a zero-initialized parser to help reinitialize the parser for pooling. var zeroParser yyParserImpl -// mySQLParserVersion is the version of MySQL that the parser would emulate -var mySQLParserVersion string - // yyParsePooled is a wrapper around yyParse that pools the parser objects. There isn't a // particularly good reason to use yyParse directly, since it immediately discards its parser. // @@ -80,12 +74,12 @@ func yyParsePooled(yylex yyLexer) int { // bind variables that were found in the original SQL query. If a DDL statement // is partially parsed but still contains a syntax error, the // error is ignored and the DDL is returned anyway. -func Parse2(sql string) (Statement, BindVars, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) Parse2(sql string) (Statement, BindVars, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { if tokenizer.partialDDL != nil { if typ, val := tokenizer.Scan(); typ != 0 { - return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", string(val)) + return nil, nil, fmt.Errorf("extra characters encountered after end of DDL: '%s'", val) } log.Warningf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) switch x := tokenizer.partialDDL.(type) { @@ -105,28 +99,6 @@ func Parse2(sql string) (Statement, BindVars, error) { return tokenizer.ParseTree, tokenizer.BindVars, nil } -func checkParserVersionFlag() { - if flag.Parsed() { - versionFlagSync.Do(func() { - convVersion, err := convertMySQLVersionToCommentVersion(servenv.MySQLServerVersion()) - if err != nil { - log.Fatalf("unable to parse mysql version: %v", err) - } - mySQLParserVersion = convVersion - }) - } -} - -// SetParserVersion sets the mysql parser version -func SetParserVersion(version string) { - mySQLParserVersion = version -} - -// GetParserVersion returns the version of the mysql parser -func GetParserVersion() string { - return mySQLParserVersion -} - // convertMySQLVersionToCommentVersion converts the MySQL version into comment version format. func convertMySQLVersionToCommentVersion(version string) (string, error) { var res = make([]int, 3) @@ -166,8 +138,8 @@ func convertMySQLVersionToCommentVersion(version string) (string, error) { } // ParseExpr parses an expression and transforms it to an AST -func ParseExpr(sql string) (Expr, error) { - stmt, err := Parse("select " + sql) +func (p *Parser) ParseExpr(sql string) (Expr, error) { + stmt, err := p.Parse("select " + sql) if err != nil { return nil, err } @@ -176,15 +148,15 @@ func ParseExpr(sql string) (Expr, error) { } // Parse behaves like Parse2 but does not return a set of bind variables -func Parse(sql string) (Statement, error) { - stmt, _, err := Parse2(sql) +func (p *Parser) Parse(sql string) (Statement, error) { + stmt, _, err := p.Parse2(sql) return stmt, err } // ParseStrictDDL is the same as Parse except it errors on // partially parsed DDL statements. -func ParseStrictDDL(sql string) (Statement, error) { - tokenizer := NewStringTokenizer(sql) +func (p *Parser) ParseStrictDDL(sql string) (Statement, error) { + tokenizer := p.NewStringTokenizer(sql) if yyParsePooled(tokenizer) != 0 { return nil, tokenizer.LastError } @@ -194,17 +166,11 @@ func ParseStrictDDL(sql string) (Statement, error) { return tokenizer.ParseTree, nil } -// ParseTokenizer is a raw interface to parse from the given tokenizer. -// This does not used pooled parsers, and should not be used in general. -func ParseTokenizer(tokenizer *Tokenizer) int { - return yyParse(tokenizer) -} - // ParseNext parses a single SQL statement from the tokenizer // returning a Statement which is the AST representation of the query. // The tokenizer will always read up to the end of the statement, allowing for // the next call to ParseNext to parse any subsequent SQL statements. When -// there are no more statements to parse, a error of io.EOF is returned. +// there are no more statements to parse, an error of io.EOF is returned. func ParseNext(tokenizer *Tokenizer) (Statement, error) { return parseNext(tokenizer, false) } @@ -243,10 +209,10 @@ func parseNext(tokenizer *Tokenizer, strict bool) (Statement, error) { // ErrEmpty is a sentinel error returned when parsing empty statements. var ErrEmpty = vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.EmptyQuery, "Query was empty") -// SplitStatement returns the first sql statement up to either a ; or EOF +// SplitStatement returns the first sql statement up to either a ';' or EOF // and the remainder from the given buffer -func SplitStatement(blob string) (string, string, error) { - tokenizer := NewStringTokenizer(blob) +func (p *Parser) SplitStatement(blob string) (string, string, error) { + tokenizer := p.NewStringTokenizer(blob) tkn := 0 for { tkn, _ = tokenizer.Scan() @@ -265,7 +231,7 @@ func SplitStatement(blob string) (string, string, error) { // SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces // returns the sql pieces blob contains; or error if sql cannot be parsed -func SplitStatementToPieces(blob string) (pieces []string, err error) { +func (p *Parser) SplitStatementToPieces(blob string) (pieces []string, err error) { // fast path: the vast majority of SQL statements do not have semicolons in them if blob == "" { return nil, nil @@ -273,12 +239,15 @@ func SplitStatementToPieces(blob string) (pieces []string, err error) { switch strings.IndexByte(blob, ';') { case -1: // if there is no semicolon, return blob as a whole return []string{blob}, nil - case len(blob) - 1: // if there's a single semicolon and it's the last character, return blob without it + case len(blob) - 1: // if there's a single semicolon, and it's the last character, return blob without it return []string{blob[:len(blob)-1]}, nil } pieces = make([]string, 0, 16) - tokenizer := NewStringTokenizer(blob) + // It's safe here to not case about version specific tokenization + // because we are only interested in semicolons and splitting + // statements. + tokenizer := p.NewStringTokenizer(blob) tkn := 0 var stmt string @@ -313,6 +282,49 @@ loop: return } -func IsMySQL80AndAbove() bool { - return mySQLParserVersion >= "80000" +func (p *Parser) IsMySQL80AndAbove() bool { + return p.version >= "80000" +} + +func (p *Parser) SetTruncateErrLen(l int) { + p.truncateErrLen = l +} + +type Options struct { + MySQLServerVersion string + TruncateUILen int + TruncateErrLen int +} + +type Parser struct { + version string + truncateUILen int + truncateErrLen int +} + +func New(opts Options) (*Parser, error) { + if opts.MySQLServerVersion == "" { + opts.MySQLServerVersion = config.DefaultMySQLVersion + } + convVersion, err := convertMySQLVersionToCommentVersion(opts.MySQLServerVersion) + if err != nil { + return nil, err + } + return &Parser{ + version: convVersion, + truncateUILen: opts.TruncateUILen, + truncateErrLen: opts.TruncateErrLen, + }, nil +} + +func NewTestParser() *Parser { + convVersion, err := convertMySQLVersionToCommentVersion(config.DefaultMySQLVersion) + if err != nil { + panic(err) + } + return &Parser{ + version: convVersion, + truncateUILen: 512, + truncateErrLen: 0, + } } diff --git a/go/vt/sqlparser/parser_test.go b/go/vt/sqlparser/parser_test.go index 537cc598da7..5cb15317f29 100644 --- a/go/vt/sqlparser/parser_test.go +++ b/go/vt/sqlparser/parser_test.go @@ -51,9 +51,10 @@ func TestEmptyErrorAndComments(t *testing.T) { output: "select 1 from dual", }, } + parser := NewTestParser() for _, testcase := range testcases { t.Run(testcase.input, func(t *testing.T) { - res, err := Parse(testcase.input) + res, err := parser.Parse(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { @@ -63,7 +64,7 @@ func TestEmptyErrorAndComments(t *testing.T) { }) t.Run(testcase.input+"-Strict DDL", func(t *testing.T) { - res, err := ParseStrictDDL(testcase.input) + res, err := parser.ParseStrictDDL(testcase.input) if testcase.err != nil { require.Equal(t, testcase.err, err) } else { diff --git a/go/vt/sqlparser/precedence_test.go b/go/vt/sqlparser/precedence_test.go index a6cbffee351..774ada31dbd 100644 --- a/go/vt/sqlparser/precedence_test.go +++ b/go/vt/sqlparser/precedence_test.go @@ -53,8 +53,9 @@ func TestAndOrPrecedence(t *testing.T) { input: "select * from a where a=b or c=d and e=f", output: "(a = b or (c = d and e = f))", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -77,8 +78,9 @@ func TestPlusStarPrecedence(t *testing.T) { input: "select 1*2+3 from a", output: "((1 * 2) + 3)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -104,8 +106,9 @@ func TestIsPrecedence(t *testing.T) { input: "select * from a where (a=1 and b=2) is true", output: "((a = 1 and b = 2) is true)", }} + parser := NewTestParser() for _, tcase := range validSQL { - tree, err := Parse(tcase.input) + tree, err := parser.Parse(tcase.input) if err != nil { t.Error(err) continue @@ -158,9 +161,10 @@ func TestParens(t *testing.T) { {in: "0 <=> (1 and 0)", expected: "0 <=> (1 and 0)"}, } + parser := NewTestParser() for _, tc := range tests { t.Run(tc.in, func(t *testing.T) { - stmt, err := Parse("select " + tc.in) + stmt, err := parser.Parse("select " + tc.in) require.NoError(t, err) out := String(stmt) require.Equal(t, "select "+tc.expected+" from dual", out) @@ -177,6 +181,7 @@ func TestRandom(t *testing.T) { g := NewGenerator(r, 5) endBy := time.Now().Add(1 * time.Second) + parser := NewTestParser() for { if time.Now().After(endBy) { break @@ -186,7 +191,7 @@ func TestRandom(t *testing.T) { inputQ := "select " + String(randomExpr) + " from t" // When it's parsed and unparsed - parsedInput, err := Parse(inputQ) + parsedInput, err := parser.Parse(inputQ) require.NoError(t, err, inputQ) // Then the unparsing should be the same as the input query diff --git a/go/vt/sqlparser/predicate_rewriting_test.go b/go/vt/sqlparser/predicate_rewriting_test.go index a4bbb5f7b5c..ceb4b276017 100644 --- a/go/vt/sqlparser/predicate_rewriting_test.go +++ b/go/vt/sqlparser/predicate_rewriting_test.go @@ -86,9 +86,10 @@ func TestSimplifyExpression(in *testing.T) { expected: "A and (B or C)", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) expr, changed := simplifyExpression(expr) @@ -157,9 +158,10 @@ func TestRewritePredicate(in *testing.T) { expected: "not n0 xor not (n2 and n3) xor (not n2 and (n1 xor n1) xor (n0 xor n0 xor n2))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := RewritePredicate(expr) @@ -180,9 +182,10 @@ func TestExtractINFromOR(in *testing.T) { expected: "(a) in ((1), (2), (3), (4), (5), (6))", }} + parser := NewTestParser() for _, tc := range tests { in.Run(tc.in, func(t *testing.T) { - expr, err := ParseExpr(tc.in) + expr, err := parser.ParseExpr(tc.in) require.NoError(t, err) output := ExtractINFromOR(expr.(*OrExpr)) diff --git a/go/vt/sqlparser/redact_query.go b/go/vt/sqlparser/redact_query.go index 194ad1ca64d..e6b8c009c68 100644 --- a/go/vt/sqlparser/redact_query.go +++ b/go/vt/sqlparser/redact_query.go @@ -19,11 +19,11 @@ package sqlparser import querypb "vitess.io/vitess/go/vt/proto/query" // RedactSQLQuery returns a sql string with the params stripped out for display -func RedactSQLQuery(sql string) (string, error) { +func (p *Parser) RedactSQLQuery(sql string) (string, error) { bv := map[string]*querypb.BindVariable{} sqlStripped, comments := SplitMarginComments(sql) - stmt, reservedVars, err := Parse2(sqlStripped) + stmt, reservedVars, err := p.Parse2(sqlStripped) if err != nil { return "", err } diff --git a/go/vt/sqlparser/redact_query_test.go b/go/vt/sqlparser/redact_query_test.go index 1cfd6d83af3..042f0f5b5f2 100644 --- a/go/vt/sqlparser/redact_query_test.go +++ b/go/vt/sqlparser/redact_query_test.go @@ -23,8 +23,9 @@ import ( ) func TestRedactSQLStatements(t *testing.T) { + parser := NewTestParser() sql := "select a,b,c from t where x = 1234 and y = 1234 and z = 'apple'" - redactedSQL, err := RedactSQLQuery(sql) + redactedSQL, err := parser.RedactSQLQuery(sql) if err != nil { t.Fatalf("redacting sql failed: %v", err) } diff --git a/go/vt/sqlparser/rewriter_test.go b/go/vt/sqlparser/rewriter_test.go index 3044e04f8b0..91c925d672f 100644 --- a/go/vt/sqlparser/rewriter_test.go +++ b/go/vt/sqlparser/rewriter_test.go @@ -43,7 +43,8 @@ func BenchmarkVisitLargeExpression(b *testing.B) { func TestReplaceWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -67,7 +68,8 @@ func TestReplaceWorksInLaterCalls(t *testing.T) { func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { q := "select * from tbl1" - stmt, err := Parse(q) + parser := NewTestParser() + stmt, err := parser.Parse(q) require.NoError(t, err) count := 0 Rewrite(stmt, func(cursor *Cursor) bool { @@ -94,7 +96,8 @@ func TestReplaceAndRevisitWorksInLaterCalls(t *testing.T) { } func TestChangeValueTypeGivesError(t *testing.T) { - parse, err := Parse("select * from a join b on a.id = b.id") + parser := NewTestParser() + parse, err := parser.Parse("select * from a join b on a.id = b.id") require.NoError(t, err) defer func() { diff --git a/go/vt/sqlparser/token.go b/go/vt/sqlparser/token.go index 2b82e619445..58f575f8642 100644 --- a/go/vt/sqlparser/token.go +++ b/go/vt/sqlparser/token.go @@ -44,18 +44,18 @@ type Tokenizer struct { multi bool specialComment *Tokenizer - Pos int - buf string + Pos int + buf string + parser *Parser } // NewStringTokenizer creates a new Tokenizer for the // sql string. -func NewStringTokenizer(sql string) *Tokenizer { - checkParserVersionFlag() - +func (p *Parser) NewStringTokenizer(sql string) *Tokenizer { return &Tokenizer{ buf: sql, BindVars: make(map[string]struct{}), + parser: p, } } @@ -680,9 +680,9 @@ func (tkn *Tokenizer) scanMySQLSpecificComment() (int, string) { commentVersion, sql := ExtractMysqlComment(tkn.buf[start:tkn.Pos]) - if mySQLParserVersion >= commentVersion { + if tkn.parser.version >= commentVersion { // Only add the special comment to the tokenizer if the version of MySQL is higher or equal to the comment version - tkn.specialComment = NewStringTokenizer(sql) + tkn.specialComment = tkn.parser.NewStringTokenizer(sql) } return tkn.Scan() diff --git a/go/vt/sqlparser/token_test.go b/go/vt/sqlparser/token_test.go index 0fd43b8f86c..b6848d35f06 100644 --- a/go/vt/sqlparser/token_test.go +++ b/go/vt/sqlparser/token_test.go @@ -74,9 +74,10 @@ func TestLiteralID(t *testing.T) { out: "@x @y", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) require.Equal(t, tcase.out, string(out)) @@ -148,9 +149,10 @@ func TestString(t *testing.T) { want: "hello", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - id, got := NewStringTokenizer(tcase.in).Scan() + id, got := parser.NewStringTokenizer(tcase.in).Scan() require.Equal(t, tcase.id, id, "Scan(%q) = (%s), want (%s)", tcase.in, tokenName(id), tokenName(tcase.id)) require.Equal(t, tcase.want, string(got)) }) @@ -193,9 +195,10 @@ func TestSplitStatement(t *testing.T) { sql: "", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - sql, rem, err := SplitStatement(tcase.in) + sql, rem, err := parser.SplitStatement(tcase.in) if err != nil { t.Errorf("EndOfStatementPosition(%s): ERROR: %v", tcase.in, err) return @@ -218,27 +221,28 @@ func TestVersion(t *testing.T) { in string id []int }{{ - version: "50709", + version: "5.7.9", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80101", + version: "8.1.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{FROM, IN, EXISTS, 0}, }, { - version: "80201", + version: "8.2.1", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }, { - version: "80102", + version: "8.1.2", in: "/*!80102 SELECT*/ FROM IN EXISTS", id: []int{SELECT, FROM, IN, EXISTS, 0}, }} for _, tcase := range testcases { t.Run(tcase.version+"_"+tcase.in, func(t *testing.T) { - mySQLParserVersion = tcase.version - tok := NewStringTokenizer(tcase.in) + parser, err := New(Options{MySQLServerVersion: tcase.version}) + require.NoError(t, err) + tok := parser.NewStringTokenizer(tcase.in) for _, expectedID := range tcase.id { id, _ := tok.Scan() require.Equal(t, expectedID, id) @@ -306,9 +310,10 @@ func TestIntegerAndID(t *testing.T) { out: "3.2", }} + parser := NewTestParser() for _, tcase := range testcases { t.Run(tcase.in, func(t *testing.T) { - tkn := NewStringTokenizer(tcase.in) + tkn := parser.NewStringTokenizer(tcase.in) id, out := tkn.Scan() require.Equal(t, tcase.id, id) expectedOut := tcase.out diff --git a/go/vt/sqlparser/tracked_buffer.go b/go/vt/sqlparser/tracked_buffer.go index aab0c1a1331..aec206f3b3d 100644 --- a/go/vt/sqlparser/tracked_buffer.go +++ b/go/vt/sqlparser/tracked_buffer.go @@ -34,7 +34,7 @@ type NodeFormatter func(buf *TrackedBuffer, node SQLNode) // want to generate a query that's different from the default. type TrackedBuffer struct { *strings.Builder - bindLocations []bindLocation + bindLocations []BindLocation nodeFormatter NodeFormatter literal func(string) (int, error) fast bool @@ -288,9 +288,9 @@ func areBothISExpr(op Expr, val Expr) bool { // WriteArg writes a value argument into the buffer along with // tracking information for future substitutions. func (buf *TrackedBuffer) WriteArg(prefix, arg string) { - buf.bindLocations = append(buf.bindLocations, bindLocation{ - offset: buf.Len(), - length: len(prefix) + len(arg), + buf.bindLocations = append(buf.bindLocations, BindLocation{ + Offset: buf.Len(), + Length: len(prefix) + len(arg), }) buf.WriteString(prefix) buf.WriteString(arg) diff --git a/go/vt/sqlparser/tracked_buffer_test.go b/go/vt/sqlparser/tracked_buffer_test.go index 2375441b34e..4dff65634e8 100644 --- a/go/vt/sqlparser/tracked_buffer_test.go +++ b/go/vt/sqlparser/tracked_buffer_test.go @@ -278,16 +278,17 @@ func TestCanonicalOutput(t *testing.T) { }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - tree, err := Parse(tc.input) + tree, err := parser.Parse(tc.input) require.NoError(t, err, tc.input) out := CanonicalString(tree) require.Equal(t, tc.canonical, out, "bad serialization") // Make sure we've generated a valid query! - rereadStmt, err := Parse(out) + rereadStmt, err := parser.Parse(out) require.NoError(t, err, out) out = CanonicalString(rereadStmt) require.Equal(t, tc.canonical, out, "bad serialization") diff --git a/go/vt/sqlparser/truncate_query.go b/go/vt/sqlparser/truncate_query.go index 4bb63730fd2..3f4231fe8b5 100644 --- a/go/vt/sqlparser/truncate_query.go +++ b/go/vt/sqlparser/truncate_query.go @@ -16,55 +16,14 @@ limitations under the License. package sqlparser -import ( - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" -) - -var ( - // truncateUILen truncate queries in debug UIs to the given length. 0 means unlimited. - truncateUILen = 512 - - // truncateErrLen truncate queries in error logs to the given length. 0 means unlimited. - truncateErrLen = 0 -) - const TruncationText = "[TRUNCATED]" -func registerQueryTruncationFlags(fs *pflag.FlagSet) { - fs.IntVar(&truncateUILen, "sql-max-length-ui", truncateUILen, "truncate queries in debug UIs to the given length (default 512)") - fs.IntVar(&truncateErrLen, "sql-max-length-errors", truncateErrLen, "truncate queries in error logs to the given length (default unlimited)") -} - -func init() { - for _, cmd := range []string{ - "vtgate", - "vttablet", - "vtcombo", - "vtctld", - "vtctl", - "vtexplain", - "vtbackup", - "vttestserver", - "vtbench", - } { - servenv.OnParseFor(cmd, registerQueryTruncationFlags) - } -} - // GetTruncateErrLen is a function used to read the value of truncateErrLen -func GetTruncateErrLen() int { - return truncateErrLen -} - -// SetTruncateErrLen is a function used to override the value of truncateErrLen -// It is only meant to be used from tests and not from production code. -func SetTruncateErrLen(errLen int) { - truncateErrLen = errLen +func (p *Parser) GetTruncateErrLen() int { + return p.truncateErrLen } -func truncateQuery(query string, max int) string { +func TruncateQuery(query string, max int) string { sql, comments := SplitMarginComments(query) if max == 0 || len(sql) <= max { @@ -76,13 +35,13 @@ func truncateQuery(query string, max int) string { // TruncateForUI is used when displaying queries on various Vitess status pages // to keep the pages small enough to load and render properly -func TruncateForUI(query string) string { - return truncateQuery(query, truncateUILen) +func (p *Parser) TruncateForUI(query string) string { + return TruncateQuery(query, p.truncateUILen) } // TruncateForLog is used when displaying queries as part of error logs // to avoid overwhelming logging systems with potentially long queries and // bind value data. -func TruncateForLog(query string) string { - return truncateQuery(query, truncateErrLen) +func (p *Parser) TruncateForLog(query string) string { + return TruncateQuery(query, p.truncateErrLen) } diff --git a/go/vt/sqlparser/truncate_query_test.go b/go/vt/sqlparser/truncate_query_test.go index e5fc2fc0a9c..c7a2eed4493 100644 --- a/go/vt/sqlparser/truncate_query_test.go +++ b/go/vt/sqlparser/truncate_query_test.go @@ -26,7 +26,7 @@ func TestTruncateQuery(t *testing.T) { } for _, tt := range tests { t.Run(fmt.Sprintf("%s-%d", tt.query, tt.max), func(t *testing.T) { - assert.Equalf(t, tt.want, truncateQuery(tt.query, tt.max), "truncateQuery(%v, %v)", tt.query, tt.max) + assert.Equalf(t, tt.want, TruncateQuery(tt.query, tt.max), "TruncateQuery(%v, %v)", tt.query, tt.max) }) } } diff --git a/go/vt/sqlparser/utils.go b/go/vt/sqlparser/utils.go index 2258eb2fd02..16c3e4ce976 100644 --- a/go/vt/sqlparser/utils.go +++ b/go/vt/sqlparser/utils.go @@ -25,18 +25,18 @@ import ( // QueryMatchesTemplates sees if the given query has the same fingerprint as one of the given templates // (one is enough) -func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { +func (p *Parser) QueryMatchesTemplates(query string, queryTemplates []string) (match bool, err error) { if len(queryTemplates) == 0 { return false, fmt.Errorf("No templates found") } bv := make(map[string]*querypb.BindVariable) normalize := func(q string) (string, error) { - q, err := NormalizeAlphabetically(q) + q, err := p.NormalizeAlphabetically(q) if err != nil { return "", err } - stmt, reservedVars, err := Parse2(q) + stmt, reservedVars, err := p.Parse2(q) if err != nil { return "", err } @@ -69,8 +69,8 @@ func QueryMatchesTemplates(query string, queryTemplates []string) (match bool, e // NormalizeAlphabetically rewrites given query such that: // - WHERE 'AND' expressions are reordered alphabetically -func NormalizeAlphabetically(query string) (normalized string, err error) { - stmt, err := Parse(query) +func (p *Parser) NormalizeAlphabetically(query string) (normalized string, err error) { + stmt, err := p.Parse(query) if err != nil { return normalized, err } @@ -118,12 +118,12 @@ func NormalizeAlphabetically(query string) (normalized string, err error) { // replaces any cases of the provided database name with the // specified replacement name. // Note: both database names provided should be unescaped strings. -func ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { +func (p *Parser) ReplaceTableQualifiers(query, olddb, newdb string) (string, error) { if newdb == olddb { // Nothing to do here. return query, nil } - in, err := Parse(query) + in, err := p.Parse(query) if err != nil { return "", err } diff --git a/go/vt/sqlparser/utils_test.go b/go/vt/sqlparser/utils_test.go index 63c9b10ba43..b2833a8187c 100644 --- a/go/vt/sqlparser/utils_test.go +++ b/go/vt/sqlparser/utils_test.go @@ -47,8 +47,9 @@ func TestNormalizeAlphabetically(t *testing.T) { out: "select * from tbl where b = 4 or a = 3", }} + parser := NewTestParser() for _, tc := range testcases { - normalized, err := NormalizeAlphabetically(tc.in) + normalized, err := parser.NormalizeAlphabetically(tc.in) assert.NoError(t, err) assert.Equal(t, tc.out, normalized) } @@ -173,9 +174,10 @@ func TestQueryMatchesTemplates(t *testing.T) { out: true, }, } + parser := NewTestParser() for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - match, err := QueryMatchesTemplates(tc.q, tc.tmpl) + match, err := parser.QueryMatchesTemplates(tc.q, tc.tmpl) assert.NoError(t, err) assert.Equal(t, tc.out, match) }) @@ -263,9 +265,10 @@ func TestReplaceTableQualifiers(t *testing.T) { out: "set names 'binary'", }, } + parser := NewTestParser() for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := ReplaceTableQualifiers(tt.in, origDB, tt.newdb) + got, err := parser.ReplaceTableQualifiers(tt.in, origDB, tt.newdb) if tt.wantErr { require.Error(t, err) } else { diff --git a/go/vt/srvtopo/resolver.go b/go/vt/srvtopo/resolver.go index a9a1f858ec8..042e291c0a6 100644 --- a/go/vt/srvtopo/resolver.go +++ b/go/vt/srvtopo/resolver.go @@ -83,24 +83,6 @@ type ResolvedShard struct { Gateway Gateway } -// ResolvedShardEqual is an equality check on *ResolvedShard. -func ResolvedShardEqual(rs1, rs2 *ResolvedShard) bool { - return proto.Equal(rs1.Target, rs2.Target) -} - -// ResolvedShardsEqual is an equality check on []*ResolvedShard. -func ResolvedShardsEqual(rss1, rss2 []*ResolvedShard) bool { - if len(rss1) != len(rss2) { - return false - } - for i, rs1 := range rss1 { - if !ResolvedShardEqual(rs1, rss2[i]) { - return false - } - } - return true -} - // WithKeyspace returns a ResolvedShard with a new keyspace keeping other parameters the same func (rs *ResolvedShard) WithKeyspace(newKeyspace string) *ResolvedShard { return &ResolvedShard{ diff --git a/go/vt/throttler/demo/throttler_demo.go b/go/vt/throttler/demo/throttler_demo.go index 126b9098236..b0e8a8d8bb1 100644 --- a/go/vt/throttler/demo/throttler_demo.go +++ b/go/vt/throttler/demo/throttler_demo.go @@ -26,6 +26,10 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -114,9 +118,9 @@ type replica struct { wg sync.WaitGroup } -func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server) *replica { +func newReplica(lagUpdateInterval, degrationInterval, degrationDuration time.Duration, ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *replica { t := &testing.T{} - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collationEnv, parser) fakeTablet := testlib.NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_REPLICA, nil, testlib.TabletKeyspaceShard(t, "ks", "-80")) fakeTablet.StartActionLoop(t, wr) @@ -308,7 +312,16 @@ func main() { log.Infof("start rate set to: %v", rate) ts := memorytopo.NewServer(context.Background(), "cell1") - replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts) + collationEnv := collations.NewEnvironment(servenv.MySQLServerVersion()) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatal(err) + } + replica := newReplica(lagUpdateInterval, replicaDegrationInterval, replicaDegrationDuration, ts, collationEnv, parser) primary := &primary{replica: replica} client := newClient(context.Background(), primary, replica, ts) client.run() diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go deleted file mode 100644 index 1518d7ea8d8..00000000000 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package grpcthrottlerclient contains the gRPC version of the throttler client protocol. -package grpcthrottlerclient - -import ( - "flag" - - "context" - - "google.golang.org/grpc" - - "vitess.io/vitess/go/vt/grpcclient" - "vitess.io/vitess/go/vt/throttler/throttlerclient" - "vitess.io/vitess/go/vt/vterrors" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" - throttlerservicepb "vitess.io/vitess/go/vt/proto/throttlerservice" -) - -var ( - cert = flag.String("throttler_client_grpc_cert", "", "the cert to use to connect") - key = flag.String("throttler_client_grpc_key", "", "the key to use to connect") - ca = flag.String("throttler_client_grpc_ca", "", "the server ca to use to validate servers when connecting") - crl = flag.String("throttler_client_grpc_crl", "", "the server crl to use to validate server certificates when connecting") - name = flag.String("throttler_client_grpc_server_name", "", "the server name to use to validate server certificate") -) - -type client struct { - conn *grpc.ClientConn - gRPCClient throttlerservicepb.ThrottlerClient -} - -func factory(addr string) (throttlerclient.Client, error) { - opt, err := grpcclient.SecureDialOption(*cert, *key, *ca, *crl, *name) - if err != nil { - return nil, err - } - conn, err := grpcclient.Dial(addr, grpcclient.FailFast(false), opt) - if err != nil { - return nil, err - } - gRPCClient := throttlerservicepb.NewThrottlerClient(conn) - - return &client{conn, gRPCClient}, nil -} - -// MaxRates is part of the throttlerclient.Client interface and returns the -// current max rate for each throttler of the process. -func (c *client) MaxRates(ctx context.Context) (map[string]int64, error) { - response, err := c.gRPCClient.MaxRates(ctx, &throttlerdatapb.MaxRatesRequest{}) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Rates, nil -} - -// SetMaxRate is part of the throttlerclient.Client interface and sets the rate -// on all throttlers of the server. -func (c *client) SetMaxRate(ctx context.Context, rate int64) ([]string, error) { - request := &throttlerdatapb.SetMaxRateRequest{ - Rate: rate, - } - - response, err := c.gRPCClient.SetMaxRate(ctx, request) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// GetConfiguration is part of the throttlerclient.Client interface. -func (c *client) GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { - response, err := c.gRPCClient.GetConfiguration(ctx, &throttlerdatapb.GetConfigurationRequest{ - ThrottlerName: throttlerName, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Configurations, nil -} - -// UpdateConfiguration is part of the throttlerclient.Client interface. -func (c *client) UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { - response, err := c.gRPCClient.UpdateConfiguration(ctx, &throttlerdatapb.UpdateConfigurationRequest{ - ThrottlerName: throttlerName, - Configuration: configuration, - CopyZeroValues: copyZeroValues, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// ResetConfiguration is part of the throttlerclient.Client interface. -func (c *client) ResetConfiguration(ctx context.Context, throttlerName string) ([]string, error) { - response, err := c.gRPCClient.ResetConfiguration(ctx, &throttlerdatapb.ResetConfigurationRequest{ - ThrottlerName: throttlerName, - }) - if err != nil { - return nil, vterrors.FromGRPC(err) - } - return response.Names, nil -} - -// Close is part of the throttlerclient.Client interface. -func (c *client) Close() { - c.conn.Close() -} - -func init() { - throttlerclient.RegisterFactory("grpc", factory) -} diff --git a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go b/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go deleted file mode 100644 index d3ae3c40a33..00000000000 --- a/go/vt/throttler/grpcthrottlerclient/grpcthrottlerclient_test.go +++ /dev/null @@ -1,76 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package grpcthrottlerclient - -import ( - "fmt" - "net" - "testing" - - "google.golang.org/grpc" - - "vitess.io/vitess/go/vt/throttler" - "vitess.io/vitess/go/vt/throttler/grpcthrottlerserver" - "vitess.io/vitess/go/vt/throttler/throttlerclienttest" -) - -// TestThrottlerServer tests the gRPC implementation using a throttler client -// and server. -func TestThrottlerServer(t *testing.T) { - // Use the global manager which is a singleton. - port := startGRPCServer(t, throttler.GlobalManager) - - // Create a ThrottlerClient gRPC client to talk to the throttler. - client, err := factory(fmt.Sprintf("localhost:%v", port)) - if err != nil { - t.Fatalf("Cannot create client: %v", err) - } - defer client.Close() - - throttlerclienttest.TestSuite(t, client) -} - -// TestThrottlerServerPanics tests the panic handling of the gRPC throttler -// server implementation. -func TestThrottlerServerPanics(t *testing.T) { - // For testing the panic handling, use a fake Manager instead. - port := startGRPCServer(t, &throttlerclienttest.FakeManager{}) - - // Create a ThrottlerClient gRPC client to talk to the throttler. - client, err := factory(fmt.Sprintf("localhost:%v", port)) - if err != nil { - t.Fatalf("Cannot create client: %v", err) - } - defer client.Close() - - throttlerclienttest.TestSuitePanics(t, client) -} - -func startGRPCServer(t *testing.T, m throttler.Manager) int { - // Listen on a random port. - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatalf("Cannot listen: %v", err) - } - - s := grpc.NewServer() - grpcthrottlerserver.RegisterServer(s, m) - // Call Serve() after our service has been registered. Otherwise, the test - // will fail with the error "grpc: Server.RegisterService after Server.Serve". - go s.Serve(listener) - return listener.Addr().(*net.TCPAddr).Port -} diff --git a/go/vt/throttler/throttlerclient/throttlerclient.go b/go/vt/throttler/throttlerclient/throttlerclient.go deleted file mode 100644 index cf01ccb1239..00000000000 --- a/go/vt/throttler/throttlerclient/throttlerclient.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package throttlerclient defines the generic RPC client interface for the -// throttler service. It has to be implemented for the different RPC frameworks -// e.g. gRPC. -package throttlerclient - -import ( - "fmt" - "log" - - "github.com/spf13/pflag" - - "vitess.io/vitess/go/vt/servenv" - - "context" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" -) - -// protocol specifics which RPC client implementation should be used. -var protocol = "grpc" - -func init() { - servenv.OnParseFor("vttablet", registerFlags) -} - -func registerFlags(fs *pflag.FlagSet) { - fs.StringVar(&protocol, "throttler_client_protocol", protocol, "the protocol to use to talk to the integrated throttler service") -} - -// Client defines the generic RPC interface for the throttler service. -type Client interface { - // MaxRates returns the current max rate for each throttler of the process. - MaxRates(ctx context.Context) (map[string]int64, error) - - // SetMaxRate allows to change the current max rate for all throttlers - // of the process. - // It returns the names of the updated throttlers. - SetMaxRate(ctx context.Context, rate int64) ([]string, error) - - // GetConfiguration returns the configuration of the MaxReplicationlag module - // for the given throttler or all throttlers if "throttlerName" is empty. - GetConfiguration(ctx context.Context, throttlerName string) (map[string]*throttlerdatapb.Configuration, error) - - // UpdateConfiguration (partially) updates the configuration of the - // MaxReplicationlag module for the given throttler or all throttlers if - // "throttlerName" is empty. - // If "copyZeroValues" is true, fields with zero values will be copied - // as well. - // The function returns the names of the updated throttlers. - UpdateConfiguration(ctx context.Context, throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) - - // ResetConfiguration resets the configuration of the MaxReplicationlag module - // to the initial configuration for the given throttler or all throttlers if - // "throttlerName" is empty. - // The function returns the names of the updated throttlers. - ResetConfiguration(ctx context.Context, throttlerName string) ([]string, error) - - // Close will terminate the connection and free resources. - Close() -} - -// Factory has to be implemented and must create a new RPC client for a given -// "addr". -type Factory func(addr string) (Client, error) - -var factories = make(map[string]Factory) - -// RegisterFactory allows a client implementation to register itself. -func RegisterFactory(name string, factory Factory) { - if _, ok := factories[name]; ok { - log.Fatalf("RegisterFactory: %s already exists", name) - } - factories[name] = factory -} - -// New will return a client for the selected RPC implementation. -func New(addr string) (Client, error) { - factory, ok := factories[protocol] - if !ok { - return nil, fmt.Errorf("unknown throttler client protocol: %v", protocol) - } - return factory(addr) -} diff --git a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go b/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go deleted file mode 100644 index 38fd9d76286..00000000000 --- a/go/vt/throttler/throttlerclienttest/throttlerclient_testsuite.go +++ /dev/null @@ -1,262 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package throttlerclienttest contains the testsuite against which each -// RPC implementation of the throttlerclient interface must be tested. -package throttlerclienttest - -// NOTE: This file is not test-only code because it is referenced by -// tests in other packages and therefore it has to be regularly -// visible. - -// NOTE: This code is in its own package such that its dependencies -// (e.g. zookeeper) won't be drawn into production binaries as well. - -import ( - "reflect" - "strings" - "testing" - - "context" - - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/vt/throttler" - "vitess.io/vitess/go/vt/throttler/throttlerclient" - - throttlerdatapb "vitess.io/vitess/go/vt/proto/throttlerdata" -) - -// TestSuite runs the test suite on the given throttlerclient and throttlerserver. -func TestSuite(t *testing.T, c throttlerclient.Client) { - tf := &testFixture{} - if err := tf.setUp(); err != nil { - t.Fatal(err) - } - defer tf.tearDown() - - tf.maxRates(t, c) - - tf.setMaxRate(t, c) - - tf.configuration(t, c) -} - -// TestSuitePanics tests the panic handling of each RPC method. Unlike TestSuite -// it does not use the real throttler.managerImpl. Instead, it uses FakeManager -// which allows us to panic on each RPC. -func TestSuitePanics(t *testing.T, c throttlerclient.Client) { - maxRatesPanics(t, c) - - setMaxRatePanics(t, c) - - getConfigurationPanics(t, c) - - updateConfigurationPanics(t, c) - - resetConfigurationPanics(t, c) -} - -var throttlerNames = []string{"t1", "t2"} - -type testFixture struct { - throttlers []*throttler.Throttler -} - -func (tf *testFixture) setUp() error { - for _, name := range throttlerNames { - t, err := throttler.NewThrottler(name, "TPS", 1 /* threadCount */, 1, throttler.ReplicationLagModuleDisabled) - if err != nil { - return err - } - tf.throttlers = append(tf.throttlers, t) - } - return nil -} - -func (tf *testFixture) tearDown() { - for _, t := range tf.throttlers { - t.Close() - } -} - -func (tf *testFixture) maxRates(t *testing.T, client throttlerclient.Client) { - _, err := client.SetMaxRate(context.Background(), 23) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - got, err := client.MaxRates(context.Background()) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - want := map[string]int64{ - "t1": 23, - "t2": 23, - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("rate was not updated on all registered throttlers. got = %v, want = %v", got, throttlerNames) - } -} - -func (tf *testFixture) setMaxRate(t *testing.T, client throttlerclient.Client) { - got, err := client.SetMaxRate(context.Background(), 23) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - if !reflect.DeepEqual(got, throttlerNames) { - t.Fatalf("rate was not updated on all registered throttlers. got = %v, want = %v", got, throttlerNames) - } -} - -func (tf *testFixture) configuration(t *testing.T, client throttlerclient.Client) { - initialConfigs, err := client.GetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - - // Test UpdateConfiguration. - config := &throttlerdatapb.Configuration{ - TargetReplicationLagSec: 1, - MaxReplicationLagSec: 2, - InitialRate: 3, - MaxIncrease: 0.4, - EmergencyDecrease: 0.5, - MinDurationBetweenIncreasesSec: 6, - MaxDurationBetweenIncreasesSec: 7, - MinDurationBetweenDecreasesSec: 8, - SpreadBacklogAcrossSec: 9, - IgnoreNSlowestReplicas: 10, - IgnoreNSlowestRdonlys: 11, - AgeBadRateAfterSec: 12, - BadRateIncrease: 0.13, - MaxRateApproachThreshold: 0.9, - } - names, err := client.UpdateConfiguration(context.Background(), "t2", config /* false */, true /* copyZeroValues */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := names, []string{"t2"}; !reflect.DeepEqual(got, want) { - t.Fatalf("returned names of updated throttlers is wrong. got = %v, want = %v", got, want) - } - - // Test GetConfiguration. - configs, err := client.GetConfiguration(context.Background(), "t2") - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if len(configs) != 1 || configs["t2"] == nil { - t.Fatalf("wrong named configuration returned. got = %v, want configuration for t2", configs) - } - if got, want := configs["t2"], config; !proto.Equal(got, want) { - t.Fatalf("did not read updated config. got = %v, want = %v", got, want) - } - - // Reset should return the initial configs. - namesForReset, err := client.ResetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := namesForReset, throttlerNames; !reflect.DeepEqual(got, want) { - t.Fatalf("returned names of reset throttlers is wrong. got = %v, want = %v", got, want) - } - - // Verify that it was correctly set. - configsAfterReset, err := client.GetConfiguration(context.Background(), "" /* all */) - if err != nil { - t.Fatalf("Cannot execute remote command: %v", err) - } - if got, want := configsAfterReset, initialConfigs; !reflect.DeepEqual(got, want) { - t.Fatalf("wrong configurations after reset. got = %v, want = %v", got, want) - } -} - -// FakeManager implements the throttler.Manager interface and panics on all -// methods defined in the interface. -type FakeManager struct { -} - -const panicMsg = "RPC server implementation should handle this" - -// MaxRates implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) MaxRates() map[string]int64 { - panic(panicMsg) -} - -// SetMaxRate implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) SetMaxRate(int64) []string { - panic(panicMsg) -} - -// GetConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) GetConfiguration(throttlerName string) (map[string]*throttlerdatapb.Configuration, error) { - panic(panicMsg) -} - -// UpdateConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) UpdateConfiguration(throttlerName string, configuration *throttlerdatapb.Configuration, copyZeroValues bool) ([]string, error) { - panic(panicMsg) -} - -// ResetConfiguration implements the throttler.Manager interface. It always panics. -func (fm *FakeManager) ResetConfiguration(throttlerName string) ([]string, error) { - panic(panicMsg) -} - -// Test methods which test for each RPC that panics are caught. - -func maxRatesPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.MaxRates(context.Background()) - if !errorFromPanicHandler(err) { - t.Fatalf("MaxRates RPC implementation does not catch panics properly: %v", err) - } -} - -func setMaxRatePanics(t *testing.T, client throttlerclient.Client) { - _, err := client.SetMaxRate(context.Background(), 23) - if !errorFromPanicHandler(err) { - t.Fatalf("SetMaxRate RPC implementation does not catch panics properly: %v", err) - } -} - -func getConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.GetConfiguration(context.Background(), "") - if !errorFromPanicHandler(err) { - t.Fatalf("GetConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func updateConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.UpdateConfiguration(context.Background(), "", nil, false) - if !errorFromPanicHandler(err) { - t.Fatalf("UpdateConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func resetConfigurationPanics(t *testing.T, client throttlerclient.Client) { - _, err := client.ResetConfiguration(context.Background(), "") - if !errorFromPanicHandler(err) { - t.Fatalf("ResetConfiguration RPC implementation does not catch panics properly: %v", err) - } -} - -func errorFromPanicHandler(err error) bool { - if err == nil || !strings.Contains(err.Error(), panicMsg) { - return false - } - return true -} diff --git a/go/vt/tlstest/tlstest_test.go b/go/vt/tlstest/tlstest_test.go index 5c79e45b906..1a6e0ae70ba 100644 --- a/go/vt/tlstest/tlstest_test.go +++ b/go/vt/tlstest/tlstest_test.go @@ -28,6 +28,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" "vitess.io/vitess/go/vt/vttls" ) @@ -89,21 +90,20 @@ func testClientServer(t *testing.T, combineCerts bool) { dialer := new(net.Dialer) dialer.Timeout = 10 * time.Second - wg := sync.WaitGroup{} - // // Positive case: accept on server side, connect a client, send data. // - var clientErr error - wg.Add(1) - go func() { - defer wg.Done() - clientConn, clientErr := tls.DialWithDialer(dialer, "tcp", addr, clientConfig) - if clientErr == nil { - _, _ = clientConn.Write([]byte{42}) - clientConn.Close() + var clientEG errgroup.Group + clientEG.Go(func() error { + conn, err := tls.DialWithDialer(dialer, "tcp", addr, clientConfig) + if err != nil { + return err } - }() + + _, _ = conn.Write([]byte{42}) + _ = conn.Close() + return nil + }) serverConn, err := listener.Accept() if err != nil { @@ -119,10 +119,8 @@ func testClientServer(t *testing.T, combineCerts bool) { } serverConn.Close() - wg.Wait() - - if clientErr != nil { - t.Fatalf("Dial failed: %v", clientErr) + if err := clientEG.Wait(); err != nil { + t.Fatalf("client dial failed: %v", err) } // @@ -142,21 +140,23 @@ func testClientServer(t *testing.T, combineCerts bool) { t.Fatalf("TLSClientConfig failed: %v", err) } - var serverErr error - wg.Add(1) - go func() { + var serverEG errgroup.Group + serverEG.Go(func() error { // We expect the Accept to work, but the first read to fail. - defer wg.Done() - serverConn, serverErr := listener.Accept() + conn, err := listener.Accept() + if err != nil { + return err + } + // This will fail. - if serverErr == nil { - result := make([]byte, 1) - if n, err := serverConn.Read(result); err == nil { - fmt.Printf("Was able to read from server: %v\n", n) - } - serverConn.Close() + result := make([]byte, 1) + if n, err := conn.Read(result); err == nil { + return fmt.Errorf("unexpectedly able to read %d bytes from server", n) } - }() + + _ = conn.Close() + return nil + }) // When using TLS 1.2, the Dial will fail. // With TLS 1.3, the Dial will succeed and the first Read will fail. @@ -167,9 +167,9 @@ func testClientServer(t *testing.T, combineCerts bool) { } return } - wg.Wait() - if serverErr != nil { - t.Fatalf("Connection failed: %v", serverErr) + + if err := serverEG.Wait(); err != nil { + t.Fatalf("server read failed: %v", err) } data := make([]byte, 1) diff --git a/go/vt/topo/helpers/compare_test.go b/go/vt/topo/helpers/compare_test.go index d31eedee2e9..82924e522f5 100644 --- a/go/vt/topo/helpers/compare_test.go +++ b/go/vt/topo/helpers/compare_test.go @@ -17,9 +17,10 @@ limitations under the License. package helpers import ( + "context" "testing" - "context" + "vitess.io/vitess/go/vt/sqlparser" ) func TestBasicCompare(t *testing.T) { @@ -32,7 +33,7 @@ func TestBasicCompare(t *testing.T) { t.Fatalf("Compare keyspaces is not failing when topos are not in sync") } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) err = CompareKeyspaces(ctx, fromTS, toTS) if err != nil { diff --git a/go/vt/topo/helpers/copy.go b/go/vt/topo/helpers/copy.go index 0df706eba31..6dff1c6ac22 100644 --- a/go/vt/topo/helpers/copy.go +++ b/go/vt/topo/helpers/copy.go @@ -25,6 +25,7 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -33,7 +34,7 @@ import ( ) // CopyKeyspaces will create the keyspaces in the destination topo. -func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { +func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server, parser *sqlparser.Parser) error { keyspaces, err := fromTS.GetKeyspaces(ctx) if err != nil { return fmt.Errorf("GetKeyspaces: %w", err) @@ -57,7 +58,7 @@ func CopyKeyspaces(ctx context.Context, fromTS, toTS *topo.Server) error { vs, err := fromTS.GetVSchema(ctx, keyspace) switch { case err == nil: - _, err = vindexes.BuildKeyspace(vs) + _, err = vindexes.BuildKeyspace(vs, parser) if err != nil { log.Errorf("BuildKeyspace(%v): %v", keyspace, err) break diff --git a/go/vt/topo/helpers/copy_test.go b/go/vt/topo/helpers/copy_test.go index 2086a2e6552..142c6eb49ac 100644 --- a/go/vt/topo/helpers/copy_test.go +++ b/go/vt/topo/helpers/copy_test.go @@ -22,6 +22,8 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -104,7 +106,7 @@ func TestBasic(t *testing.T) { fromTS, toTS := createSetup(ctx, t) // check keyspace copy - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) keyspaces, err := toTS.GetKeyspaces(ctx) if err != nil { t.Fatalf("toTS.GetKeyspaces failed: %v", err) @@ -112,7 +114,7 @@ func TestBasic(t *testing.T) { if len(keyspaces) != 1 || keyspaces[0] != "test_keyspace" { t.Fatalf("unexpected keyspaces: %v", keyspaces) } - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) // check shard copy CopyShards(ctx, fromTS, toTS) diff --git a/go/vt/topo/helpers/tee_test.go b/go/vt/topo/helpers/tee_test.go index 4dda901c300..1fbba807937 100644 --- a/go/vt/topo/helpers/tee_test.go +++ b/go/vt/topo/helpers/tee_test.go @@ -17,12 +17,13 @@ limitations under the License. package helpers import ( + "context" "reflect" "testing" "github.com/stretchr/testify/require" - "context" + "vitess.io/vitess/go/vt/sqlparser" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -32,7 +33,7 @@ func TestTee(t *testing.T) { // create the setup, copy the data fromTS, toTS := createSetup(ctx, t) - CopyKeyspaces(ctx, fromTS, toTS) + CopyKeyspaces(ctx, fromTS, toTS, sqlparser.NewTestParser()) CopyShards(ctx, fromTS, toTS) CopyTablets(ctx, fromTS, toTS) diff --git a/go/vt/vtadmin/api.go b/go/vt/vtadmin/api.go index 92d11ba18ea..28c7aaa9bbe 100644 --- a/go/vt/vtadmin/api.go +++ b/go/vt/vtadmin/api.go @@ -32,6 +32,10 @@ import ( "github.com/gorilla/mux" "github.com/patrickmn/go-cache" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sets" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/concurrency" @@ -77,6 +81,9 @@ type API struct { // vtexplain is now global again due to stat exporters in the tablet layer // we're not super concerned because we will be deleting vtexplain Soon(TM). vtexplainLock sync.Mutex + + collationEnv *collations.Environment + parser *sqlparser.Parser } // Options wraps the configuration options for different components of the @@ -92,7 +99,7 @@ type Options struct { // NewAPI returns a new API, configured to service the given set of clusters, // and configured with the given options. -func NewAPI(clusters []*cluster.Cluster, opts Options) *API { +func NewAPI(clusters []*cluster.Cluster, opts Options, collationEnv *collations.Environment, parser *sqlparser.Parser) *API { clusterMap := make(map[string]*cluster.Cluster, len(clusters)) for _, cluster := range clusters { clusterMap[cluster.ID] = cluster @@ -135,9 +142,11 @@ func NewAPI(clusters []*cluster.Cluster, opts Options) *API { } api := &API{ - clusters: clusters, - clusterMap: clusterMap, - authz: authz, + clusters: clusters, + clusterMap: clusterMap, + authz: authz, + collationEnv: collationEnv, + parser: parser, } if opts.EnableDynamicClusters { @@ -292,10 +301,12 @@ func (api *API) WithCluster(c *cluster.Cluster, id string) dynamic.API { defer api.clusterMu.Unlock() dynamicAPI := &API{ - router: api.router, - serv: api.serv, - authz: api.authz, - options: api.options, + router: api.router, + serv: api.serv, + authz: api.authz, + options: api.options, + collationEnv: api.collationEnv, + parser: api.parser, } if c != nil { @@ -2148,7 +2159,7 @@ func (api *API) VTExplain(ctx context.Context, req *vtadminpb.VTExplainRequest) return nil, er.Error() } - vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}) + vte, err := vtexplain.Init(ctx, srvVSchema, schema, shardMap, &vtexplain.Options{ReplicationMode: "ROW"}, api.collationEnv, api.parser) if err != nil { return nil, fmt.Errorf("error initilaizing vtexplain: %w", err) } diff --git a/go/vt/vtadmin/api_authz_test.go b/go/vt/vtadmin/api_authz_test.go index dc524a64ad4..7323f601b25 100644 --- a/go/vt/vtadmin/api_authz_test.go +++ b/go/vt/vtadmin/api_authz_test.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -66,7 +68,7 @@ func TestCreateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -131,7 +133,7 @@ func TestCreateShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -198,7 +200,7 @@ func TestDeleteKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -263,7 +265,7 @@ func TestDeleteShards(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -338,7 +340,7 @@ func TestDeleteTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -405,7 +407,7 @@ func TestEmergencyFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -481,7 +483,7 @@ func TestFindSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -502,7 +504,7 @@ func TestFindSchema(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -522,7 +524,7 @@ func TestFindSchema(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -571,7 +573,7 @@ func TestGetBackups(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -644,7 +646,7 @@ func TestGetCellInfos(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -723,7 +725,7 @@ func TestGetCellsAliases(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -790,7 +792,7 @@ func TestGetClusters(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -864,7 +866,7 @@ func TestGetGates(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -883,7 +885,7 @@ func TestGetGates(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -903,7 +905,7 @@ func TestGetGates(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -944,7 +946,7 @@ func TestGetKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1011,7 +1013,7 @@ func TestGetKeyspaces(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1095,7 +1097,7 @@ func TestGetSchema(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1118,7 +1120,7 @@ func TestGetSchema(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1171,7 +1173,7 @@ func TestGetSchemas(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1190,7 +1192,7 @@ func TestGetSchemas(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1216,7 +1218,7 @@ func TestGetSchemas(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1269,7 +1271,7 @@ func TestGetShardReplicationPositions(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1350,7 +1352,7 @@ func TestGetSrvVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1417,7 +1419,7 @@ func TestGetSrvVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1498,7 +1500,7 @@ func TestGetTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1569,7 +1571,7 @@ func TestGetTablets(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1650,7 +1652,7 @@ func TestGetVSchema(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1717,7 +1719,7 @@ func TestGetVSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1807,7 +1809,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1826,7 +1828,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("partial access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1846,7 +1848,7 @@ func TestGetVtctlds(t *testing.T) { t.Run("full access", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1887,7 +1889,7 @@ func TestGetWorkflow(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -1956,7 +1958,7 @@ func TestGetWorkflows(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2023,7 +2025,7 @@ func TestPingTablet(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2088,7 +2090,7 @@ func TestPlannedFailoverShard(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2155,7 +2157,7 @@ func TestRefreshState(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2220,7 +2222,7 @@ func TestRefreshTabletReplicationSource(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2291,7 +2293,7 @@ func TestReloadSchemas(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2371,7 +2373,7 @@ func TestRunHealthCheck(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2436,7 +2438,7 @@ func TestSetReadOnly(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2501,7 +2503,7 @@ func TestSetReadWrite(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2566,7 +2568,7 @@ func TestStartReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2631,7 +2633,7 @@ func TestStopReplication(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2696,7 +2698,7 @@ func TestTabletExternallyPromoted(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2764,7 +2766,7 @@ func TestVTExplain(t *testing.T) { t.Run("unauthorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2786,7 +2788,7 @@ func TestVTExplain(t *testing.T) { t.Run("authorized actor", func(t *testing.T) { t.Parallel() - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2829,7 +2831,7 @@ func TestValidateKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2890,7 +2892,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -2951,7 +2953,7 @@ func TestValidateVersionKeyspace(t *testing.T) { err := opts.RBAC.Reify() require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) diff --git a/go/vt/vtadmin/api_test.go b/go/vt/vtadmin/api_test.go index 4a68abd6b73..c7020bd4e20 100644 --- a/go/vt/vtadmin/api_test.go +++ b/go/vt/vtadmin/api_test.go @@ -32,6 +32,10 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo" @@ -555,7 +559,7 @@ func TestFindSchema(t *testing.T) { clusters[i] = vtadmintestutil.BuildCluster(t, cfg) } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.FindSchema(ctx, tt.req) @@ -765,7 +769,7 @@ func TestFindSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() schema, err := api.FindSchema(ctx, &vtadminpb.FindSchemaRequest{ @@ -865,7 +869,7 @@ func TestGetClusters(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{}) + api := NewAPI(tt.clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetClusters(ctx, &vtadminpb.GetClustersRequest{}) assert.NoError(t, err) @@ -943,7 +947,7 @@ func TestGetGates(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}) + api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ctx := context.Background() resp, err := api.GetGates(ctx, &vtadminpb.GetGatesRequest{}) @@ -1065,7 +1069,7 @@ func TestGetKeyspace(t *testing.T) { testutil.AddShards(ctx, t, ts, shards...) topos[i] = ts vtctlds[i] = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) } @@ -1081,7 +1085,7 @@ func TestGetKeyspace(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ks, err := api.GetKeyspace(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -1309,10 +1313,10 @@ func TestGetKeyspaces(t *testing.T) { servers := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), } @@ -1334,7 +1338,7 @@ func TestGetKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetKeyspaces(ctx, tt.req) require.NoError(t, err) @@ -1544,7 +1548,7 @@ func TestGetSchema(t *testing.T) { defer cancel() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, tt.ts, nil, vtadmintestutil.TopodataTabletsFromVTAdminTablets(tt.tablets)...) @@ -1558,7 +1562,7 @@ func TestGetSchema(t *testing.T) { VtctldClient: client, Tablets: tt.tablets, }) - api := NewAPI([]*cluster.Cluster{c}, Options{}) + api := NewAPI([]*cluster.Cluster{c}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchema(ctx, tt.req) @@ -1688,7 +1692,7 @@ func TestGetSchema(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() schema, err := api.GetSchema(ctx, &vtadminpb.GetSchemaRequest{ @@ -2198,10 +2202,10 @@ func TestGetSchemas(t *testing.T) { vtctlds := []vtctlservicepb.VtctldServer{ testutil.NewVtctldServerWithTabletManagerClient(t, topos[0], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), testutil.NewVtctldServerWithTabletManagerClient(t, topos[1], &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }), } @@ -2242,7 +2246,7 @@ func TestGetSchemas(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchemas(ctx, tt.req) @@ -2463,7 +2467,7 @@ func TestGetSchemas(t *testing.T) { }, ) - api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}) + api := NewAPI([]*cluster.Cluster{c1, c2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) defer api.Close() resp, err := api.GetSchemas(context.Background(), &vtadminpb.GetSchemasRequest{ @@ -2637,7 +2641,7 @@ func TestGetSrvKeyspace(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2656,7 +2660,7 @@ func TestGetSrvKeyspace(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvKeyspace(ctx, tt.req) if tt.shouldErr { @@ -2801,7 +2805,7 @@ func TestGetSrvKeyspaces(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2822,7 +2826,7 @@ func TestGetSrvKeyspaces(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvKeyspaces(ctx, tt.req) if tt.shouldErr { @@ -2966,7 +2970,7 @@ func TestGetSrvVSchema(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -2985,7 +2989,7 @@ func TestGetSrvVSchema(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -3260,7 +3264,7 @@ func TestGetSrvVSchemas(t *testing.T) { toposerver := memorytopo.NewServer(ctx, tt.cells...) vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -3279,7 +3283,7 @@ func TestGetSrvVSchemas(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetSrvVSchemas(ctx, tt.req) if tt.shouldErr { @@ -3550,7 +3554,7 @@ func TestGetTablet(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetTablet(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3745,7 +3749,7 @@ func TestGetTablets(t *testing.T) { }) } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetTablets(ctx, tt.req) if tt.shouldErr { assert.Error(t, err) @@ -3876,7 +3880,7 @@ func TestGetVSchema(t *testing.T) { t.Parallel() clusters := []*cluster.Cluster{vtadmintestutil.BuildCluster(t, tt.clusterCfg)} - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetVSchema(ctx, tt.req) if tt.shouldErr { @@ -4206,7 +4210,7 @@ func TestGetVSchemas(t *testing.T) { } clusters := vtadmintestutil.BuildClusters(t, tt.clusterCfgs...) - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetVSchemas(ctx, tt.req) if tt.shouldErr { @@ -4290,7 +4294,7 @@ func TestGetVtctlds(t *testing.T) { }, } - api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}) + api := NewAPI([]*cluster.Cluster{cluster1, cluster2}, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) ctx := context.Background() resp, err := api.GetVtctlds(ctx, &vtadminpb.GetVtctldsRequest{}) @@ -4422,7 +4426,7 @@ func TestGetWorkflow(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) + api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetWorkflow(ctx, tt.req) if tt.shouldErr { @@ -4861,7 +4865,7 @@ func TestGetWorkflows(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}) + api := NewAPI(vtadmintestutil.BuildClusters(t, tt.cfgs...), Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.GetWorkflows(ctx, tt.req) if tt.shouldErr { @@ -5112,7 +5116,7 @@ func TestVTExplain(t *testing.T) { } vtctldserver := testutil.NewVtctldServerWithTabletManagerClient(t, toposerver, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctldserver, func(t *testing.T, vtctldClient vtctldclient.VtctldClient) { @@ -5151,7 +5155,7 @@ func TestVTExplain(t *testing.T) { }), } - api := NewAPI(clusters, Options{}) + api := NewAPI(clusters, Options{}, collations.MySQL8(), sqlparser.NewTestParser()) resp, err := api.VTExplain(ctx, tt.req) if tt.expectedError != nil { @@ -5353,7 +5357,7 @@ func TestServeHTTP(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - api := NewAPI(tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}) + api := NewAPI(tt.clusters, Options{EnableDynamicClusters: tt.enableDynamicClusters}, collations.MySQL8(), sqlparser.NewTestParser()) // Copy the Cookie over to a new Request req := httptest.NewRequest(http.MethodGet, "/api/clusters", nil) diff --git a/go/vt/vtadmin/testutil/authztestgen/template.go b/go/vt/vtadmin/testutil/authztestgen/template.go index f1ee272c373..66d12e77b0b 100644 --- a/go/vt/vtadmin/testutil/authztestgen/template.go +++ b/go/vt/vtadmin/testutil/authztestgen/template.go @@ -45,6 +45,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cluster" @@ -88,7 +90,7 @@ func Test{{ .Method }}(t *testing.T) { require.NoError(t, err, "failed to reify authorization rules: %+v", opts.RBAC.Rules) {{ if not .SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) @@ -101,7 +103,7 @@ func Test{{ .Method }}(t *testing.T) { t.Run("{{ .Name }}", func(t *testing.T) { t.Parallel() {{ if $test.SerializeCases }} - api := vtadmin.NewAPI(testClusters(t), opts) + api := vtadmin.NewAPI(testClusters(t), opts, collations.MySQL8(), sqlparser.NewTestParser()) t.Cleanup(func() { if err := api.Close(); err != nil { t.Logf("api did not close cleanly: %s", err.Error()) diff --git a/go/vt/vtadmin/testutil/cluster.go b/go/vt/vtadmin/testutil/cluster.go index 9141d6b0c22..ca9dfe00dac 100644 --- a/go/vt/vtadmin/testutil/cluster.go +++ b/go/vt/vtadmin/testutil/cluster.go @@ -27,6 +27,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -169,7 +171,7 @@ func BuildIntegrationTestCluster(t testing.TB, ctx context.Context, c *vtadminpb ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := grpcvtctldtestutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) localclient := localvtctldclient.New(vtctld) diff --git a/go/vt/vtcombo/tablet_map.go b/go/vt/vtcombo/tablet_map.go index c83fd562731..4a2aa7ba411 100644 --- a/go/vt/vtcombo/tablet_map.go +++ b/go/vt/vtcombo/tablet_map.go @@ -23,6 +23,7 @@ import ( "path" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/grpcclient" @@ -31,6 +32,7 @@ import ( "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -82,6 +84,8 @@ func CreateTablet( tabletType topodatapb.TabletType, mysqld mysqlctl.MysqlDaemon, dbcfgs *dbconfigs.DBConfigs, + collationEnv *collations.Environment, + parser *sqlparser.Parser, ) error { alias := &topodatapb.TabletAlias{ Cell: cell, @@ -89,7 +93,7 @@ func CreateTablet( } log.Infof("Creating %v tablet %v for %v/%v", tabletType, topoproto.TabletAliasString(alias), keyspace, shard) - controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias) + controller := tabletserver.NewServer(ctx, topoproto.TabletAliasString(alias), ts, alias, collationEnv, parser) initTabletType := tabletType if tabletType == topodatapb.TabletType_PRIMARY { initTabletType = topodatapb.TabletType_REPLICA @@ -104,6 +108,8 @@ func CreateTablet( MysqlDaemon: mysqld, DBConfigs: dbcfgs, QueryServiceControl: controller, + CollationEnv: collationEnv, + SQLParser: parser, } tablet := &topodatapb.Tablet{ Alias: alias, @@ -169,6 +175,8 @@ func InitTabletMap( dbcfgs *dbconfigs.DBConfigs, schemaDir string, ensureDatabase bool, + collationEnv *collations.Environment, + parser *sqlparser.Parser, ) (uint32, error) { tabletMap = make(map[uint32]*comboTablet) @@ -184,11 +192,11 @@ func InitTabletMap( }) // iterate through the keyspaces - wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil, collationEnv, parser) var uid uint32 = 1 for _, kpb := range tpb.Keyspaces { var err error - uid, err = CreateKs(ctx, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr) + uid, err = CreateKs(ctx, ts, tpb, mysqld, dbcfgs, schemaDir, kpb, ensureDatabase, uid, wr, collationEnv, parser) if err != nil { return 0, err } @@ -288,6 +296,8 @@ func CreateKs( ensureDatabase bool, uid uint32, wr *wrangler.Wrangler, + collationEnv *collations.Environment, + parser *sqlparser.Parser, ) (uint32, error) { keyspace := kpb.Name @@ -337,7 +347,7 @@ func CreateKs( replicas-- // create the primary - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone()); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_PRIMARY, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -345,7 +355,7 @@ func CreateKs( for i := 0; i < replicas; i++ { // create a replica tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone()); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_REPLICA, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -353,7 +363,7 @@ func CreateKs( for i := 0; i < rdonlys; i++ { // create a rdonly tablet - if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone()); err != nil { + if err := CreateTablet(ctx, ts, cell, uid, keyspace, shard, dbname, topodatapb.TabletType_RDONLY, mysqld, dbcfgs.Clone(), collationEnv, parser); err != nil { return 0, err } uid++ @@ -371,7 +381,7 @@ func CreateKs( return 0, fmt.Errorf("cannot load vschema file %v for keyspace %v: %v", f, keyspace, err) } - _, err = vindexes.BuildKeyspace(formal) + _, err = vindexes.BuildKeyspace(formal, wr.SQLParser()) if err != nil { return 0, fmt.Errorf("BuildKeyspace(%v) failed: %v", keyspace, err) } diff --git a/go/vt/vtctl/endtoend/get_schema_test.go b/go/vt/vtctl/endtoend/get_schema_test.go index 2373fb6e3a5..2475d92f150 100644 --- a/go/vt/vtctl/endtoend/get_schema_test.go +++ b/go/vt/vtctl/endtoend/get_schema_test.go @@ -4,7 +4,9 @@ import ( "context" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/sqlparser" "github.com/google/uuid" "github.com/stretchr/testify/assert" @@ -161,7 +163,7 @@ func TestGetSchema(t *testing.T) { logger := logutil.NewMemoryLogger() - err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ + err := vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()), []string{ "GetSchema", topoproto.TabletAliasString(tablet.Alias), }) @@ -201,7 +203,7 @@ func TestGetSchema(t *testing.T) { }, } - err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc), []string{ + err = vtctl.RunCommand(ctx, wrangler.New(logger, topo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()), []string{ "GetSchema", "--table_sizes_only", topoproto.TabletAliasString(tablet.Alias), diff --git a/go/vt/vtctl/endtoend/onlineddl_show_test.go b/go/vt/vtctl/endtoend/onlineddl_show_test.go index fe795af752d..6d94ab22bd6 100644 --- a/go/vt/vtctl/endtoend/onlineddl_show_test.go +++ b/go/vt/vtctl/endtoend/onlineddl_show_test.go @@ -9,6 +9,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl" @@ -119,7 +123,7 @@ func onlineDDLTest(t *testing.T, args []string, expectedQuery string) { tmclienttest.SetProtocol("go.vt.vtctl.endtoend", t.Name()) logger := logutil.NewMemoryLogger() - wr := wrangler.New(logger, fakeTopo, &tmc) + wr := wrangler.New(logger, fakeTopo, &tmc, collations.MySQL8(), sqlparser.NewTestParser()) err := vtctl.RunCommand(ctx, wr, args) assert.Error(t, err) diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go deleted file mode 100644 index 14147316508..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient.go +++ /dev/null @@ -1,154 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fakevtctlclient - -import ( - "fmt" - "io" - "strings" - "sync" - "time" - - "vitess.io/vitess/go/protoutil" - "vitess.io/vitess/go/vt/logutil" - - logutilpb "vitess.io/vitess/go/vt/proto/logutil" -) - -// FakeLoggerEventStreamingClient is the base for the fakes for vtctlclient. -// It allows to register a (multi-)line string for a given command and return the result as channel which streams it back. -type FakeLoggerEventStreamingClient struct { - results map[string]*result - // mu guards all fields of the structs. - mu sync.Mutex -} - -// NewFakeLoggerEventStreamingClient creates a new fake. -func NewFakeLoggerEventStreamingClient() *FakeLoggerEventStreamingClient { - return &FakeLoggerEventStreamingClient{results: make(map[string]*result)} -} - -// generateKey returns a map key for a []string. -// ([]string is not supported as map key.) -func generateKey(args []string) string { - return strings.Join(args, " ") -} - -// result contains the result the fake should respond for a given command. -type result struct { - output string - err error - // count is the number of times this result is registered for the same - // command. With each stream of this result, count will be decreased by one. - count int - // addr optionally specifies which server address is expected from the client. - addr string -} - -func (r1 result) Equals(r2 result) bool { - return r1.output == r2.output && - ((r1.err == nil && r2.err == nil) || - (r1.err != nil && r2.err != nil && r1.err.Error() == r2.err.Error())) -} - -// RegisterResult registers for a given command (args) the result which the fake should return. -// Once the result was returned, it will be automatically deregistered. -func (f *FakeLoggerEventStreamingClient) RegisterResult(args []string, output string, err error) error { - return f.RegisterResultForAddr("" /* addr */, args, output, err) -} - -// RegisterResultForAddr is identical to RegisterResult but also expects that -// the client did dial "addr" as server address. -func (f *FakeLoggerEventStreamingClient) RegisterResultForAddr(addr string, args []string, output string, err error) error { - f.mu.Lock() - defer f.mu.Unlock() - - k := generateKey(args) - v := result{output, err, 1, addr} - if result, ok := f.results[k]; ok { - if result.Equals(v) { - result.count++ - return nil - } - return fmt.Errorf("a different result (%v) is already registered for command: %v", result, args) - } - f.results[k] = &v - return nil -} - -// RegisteredCommands returns a list of commands which are currently registered. -// This is useful to check that all registered results have been consumed. -func (f *FakeLoggerEventStreamingClient) RegisteredCommands() []string { - f.mu.Lock() - defer f.mu.Unlock() - - var commands []string - for k := range f.results { - commands = append(commands, k) - } - return commands -} - -type streamResultAdapter struct { - lines []string - index int - err error -} - -func (s *streamResultAdapter) Recv() (*logutilpb.Event, error) { - if s.index < len(s.lines) { - result := &logutilpb.Event{ - Time: protoutil.TimeToProto(time.Now()), - Level: logutilpb.Level_CONSOLE, - File: "fakevtctlclient", - Line: -1, - Value: s.lines[s.index], - } - s.index++ - return result, nil - } - if s.err == nil { - return nil, io.EOF - } - return nil, s.err -} - -// StreamResult returns an EventStream which streams back a registered result as logging events. -// "addr" is the server address which the client dialed and may be empty. -func (f *FakeLoggerEventStreamingClient) StreamResult(addr string, args []string) (logutil.EventStream, error) { - f.mu.Lock() - defer f.mu.Unlock() - - k := generateKey(args) - result, ok := f.results[k] - if !ok { - return nil, fmt.Errorf("no response was registered for args: %v", args) - } - if result.addr != "" && addr != result.addr { - return nil, fmt.Errorf("client sent request to wrong server address. got: %v want: %v", addr, result.addr) - } - result.count-- - if result.count == 0 { - delete(f.results, k) - } - - return &streamResultAdapter{ - lines: strings.Split(result.output, "\n"), - index: 0, - err: result.err, - }, nil -} diff --git a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go b/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go deleted file mode 100644 index 04a0ad5e03d..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fake_loggerevent_streamingclient_test.go +++ /dev/null @@ -1,185 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package fakevtctlclient - -import ( - "errors" - "io" - "reflect" - "strings" - "testing" - - logutilpb "vitess.io/vitess/go/vt/proto/logutil" -) - -func TestStreamOutputAndError(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - wantErr := errors.New("something went wrong") - - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) -} - -func TestStreamOutput(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) -} - -// TestStreamOutputForAddr is similar to TestStreamOutput but also tests that -// the correct server address was used by the client. -func TestStreamOutputForAddr(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - addr := "localhost:12345" - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - // Used address matches. - err := fake.RegisterResultForAddr(addr, args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - verifyStreamOutputAndError(t, fake, addr, args, output, wantErr) - - // Used address does not match. - err = fake.RegisterResultForAddr(addr, args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - _, err = fake.StreamResult("different-addr", args) - if err == nil || !strings.Contains(err.Error(), "client sent request to wrong server address") { - t.Fatalf("fake should have failed because the client used the wrong address: %v", err) - } -} - -func verifyStreamOutputAndError(t *testing.T, fake *FakeLoggerEventStreamingClient, addr string, args, output []string, wantErr error) { - stream, err := fake.StreamResult(addr, args) - if err != nil { - t.Fatalf("Failed to stream result: %v", err) - } - - // Verify output and error. - i := 0 - for { - var event *logutilpb.Event - event, err = stream.Recv() - if err != nil { - break - } - if i > len(output) { - t.Fatalf("Received more events than expected. got: %v want: %v", i, len(output)) - } - if event.Value != output[i] { - t.Errorf("Received event is not identical to the received one. got: %v want: %v", event.Value, output[i]) - } - t.Logf("Received event: %v", event) - i++ - } - if i != len(output) { - t.Errorf("Number of received events mismatches. got: %v want: %v", i, len(output)) - } - if err == io.EOF { - err = nil - } - if err != wantErr { - t.Errorf("Wrong error received. got: %v want: %v", err, wantErr) - } -} - -func TestNoResultRegistered(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - stream, err := fake.StreamResult("" /* addr */, []string{"ListShardTablets", "test_keyspace/0"}) - if stream != nil { - t.Fatalf("No stream should have been returned because no matching result is registered.") - } - wantErr := "no response was registered for args: [ListShardTablets test_keyspace/0]" - if err.Error() != wantErr { - t.Errorf("Wrong error for missing result was returned. got: '%v' want: '%v'", err, wantErr) - } -} - -func TestResultAlreadyRegistered(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - errFirst := fake.RegisterResult([]string{"ListShardTablets", "test_keyspace/0"}, "output1", nil) - if errFirst != nil { - t.Fatalf("Registering the result should have been successful. Error: %v", errFirst) - } - - errSecond := fake.RegisterResult([]string{"ListShardTablets", "test_keyspace/0"}, "output2", nil) - if errSecond == nil { - t.Fatal("Registering a duplicate, different result should not have been successful.") - } - want := ") is already registered for command: " - if !strings.Contains(errSecond.Error(), want) { - t.Fatalf("Wrong error message: got: '%v' want: '%v'", errSecond, want) - } -} - -func TestRegisterMultipleResultsForSameCommand(t *testing.T) { - fake := NewFakeLoggerEventStreamingClient() - args := []string{"CopySchemaShard", "test_keyspace/0", "test_keyspace/2"} - output := []string{"event1", "event2"} - var wantErr error - - // Register first result. - err := fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - registeredCommands := []string{strings.Join(args, " ")} - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Register second result. - err = fake.RegisterResult(args, strings.Join(output, "\n"), wantErr) - if err != nil { - t.Fatalf("Failed to register fake result for: %v err: %v", args, err) - } - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Consume first result. - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) - verifyListOfRegisteredCommands(t, fake, registeredCommands) - - // Consume second result. - verifyStreamOutputAndError(t, fake, "" /* addr */, args, output, wantErr) - verifyListOfRegisteredCommands(t, fake, []string{}) -} - -func verifyListOfRegisteredCommands(t *testing.T, fake *FakeLoggerEventStreamingClient, want []string) { - got := fake.RegisteredCommands() - if len(got) == 0 && len(want) == 0 { - return - } - if !reflect.DeepEqual(got, want) { - t.Fatalf("fake.RegisteredCommands() = %v, want: %v", got, want) - } -} diff --git a/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go b/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go deleted file mode 100644 index 11224b745e9..00000000000 --- a/go/vt/vtctl/fakevtctlclient/fakevtctlclient.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Vitess Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package fakevtctlclient contains a fake for the vtctlclient interface. -package fakevtctlclient - -import ( - "time" - - "context" - - "vitess.io/vitess/go/vt/logutil" - "vitess.io/vitess/go/vt/vtctl/vtctlclient" -) - -// FakeVtctlClient is a fake which implements the vtctlclient interface. -// The fake can be used to return a specific result for a given command. -// If the command is not registered, an error will be thrown. -type FakeVtctlClient struct { - *FakeLoggerEventStreamingClient -} - -// NewFakeVtctlClient creates a FakeVtctlClient struct. -func NewFakeVtctlClient() *FakeVtctlClient { - return &FakeVtctlClient{NewFakeLoggerEventStreamingClient()} -} - -// FakeVtctlClientFactory always returns the current instance. -func (f *FakeVtctlClient) FakeVtctlClientFactory(addr string) (vtctlclient.VtctlClient, error) { - return f, nil -} - -// ExecuteVtctlCommand is part of the vtctlclient interface. -func (f *FakeVtctlClient) ExecuteVtctlCommand(ctx context.Context, args []string, actionTimeout time.Duration) (logutil.EventStream, error) { - return f.FakeLoggerEventStreamingClient.StreamResult("" /* addr */, args) -} - -// Close is part of the vtctlclient interface. -func (f *FakeVtctlClient) Close() {} diff --git a/go/vt/vtctl/grpcvtctlclient/client_test.go b/go/vt/vtctl/grpcvtctlclient/client_test.go index 50e1968533e..5349e2d1a3b 100644 --- a/go/vt/vtctl/grpcvtctlclient/client_test.go +++ b/go/vt/vtctl/grpcvtctlclient/client_test.go @@ -28,6 +28,10 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtctl/grpcvtctlserver" @@ -52,7 +56,7 @@ func TestVtctlServer(t *testing.T) { // Create a gRPC server and listen on the port server := grpc.NewServer() - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts)) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8(), sqlparser.NewTestParser())) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server @@ -86,7 +90,7 @@ func TestVtctlAuthClient(t *testing.T) { opts = append(opts, grpc.UnaryInterceptor(servenv.FakeAuthUnaryInterceptor)) server := grpc.NewServer(opts...) - vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts)) + vtctlservicepb.RegisterVtctlServer(server, grpcvtctlserver.NewVtctlServer(ts, collations.MySQL8(), sqlparser.NewTestParser())) go server.Serve(listener) authJSON := `{ diff --git a/go/vt/vtctl/grpcvtctldclient/client_test.go b/go/vt/vtctl/grpcvtctldclient/client_test.go index 93c95ffa607..7166bafbcff 100644 --- a/go/vt/vtctl/grpcvtctldclient/client_test.go +++ b/go/vt/vtctl/grpcvtctldclient/client_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" @@ -41,7 +42,7 @@ func TestFindAllShardsInKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { @@ -88,7 +89,7 @@ func TestGetKeyspace(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { @@ -117,7 +118,7 @@ func TestGetKeyspaces(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return grpcvtctldserver.NewVtctldServer(ts) + return grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.WithTestServer(t, vtctld, func(t *testing.T, client vtctldclient.VtctldClient) { diff --git a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go index f5f7847b499..c3915792e4a 100644 --- a/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go +++ b/go/vt/vtctl/grpcvtctldserver/endtoend/init_shard_primary_test.go @@ -21,8 +21,10 @@ import ( "fmt" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -46,7 +48,7 @@ func TestInitShardPrimary(t *testing.T) { ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -93,7 +95,7 @@ func TestInitShardPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) resp, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, @@ -109,7 +111,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") tmc := tmclient.NewTabletManagerClient() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) primaryDb := fakesqldb.New(t) defer primaryDb.Close() @@ -148,7 +150,7 @@ func TestInitShardPrimaryNoFormerPrimary(t *testing.T) { tablet.TM.QueryServiceControl.(*tabletservermock.Controller).SetQueryServiceEnabledForTests(true) } - vtctld := grpcvtctldserver.NewVtctldServer(ts) + vtctld := grpcvtctldserver.NewVtctldServer(ts, sqlparser.NewTestParser()) _, err := vtctld.InitShardPrimary(context.Background(), &vtctldatapb.InitShardPrimaryRequest{ Keyspace: tablet1.Tablet.Keyspace, Shard: tablet1.Tablet.Shard, diff --git a/go/vt/vtctl/grpcvtctldserver/server.go b/go/vt/vtctl/grpcvtctldserver/server.go index e98b714ef42..7c4d11b360b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server.go +++ b/go/vt/vtctl/grpcvtctldserver/server.go @@ -92,13 +92,13 @@ type VtctldServer struct { } // NewVtctldServer returns a new VtctldServer for the given topo server. -func NewVtctldServer(ts *topo.Server) *VtctldServer { +func NewVtctldServer(ts *topo.Server, parser *sqlparser.Parser) *VtctldServer { tmc := tmclient.NewTabletManagerClient() return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(ts, tmc, parser), } } @@ -108,7 +108,7 @@ func NewTestVtctldServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Vtc return &VtctldServer{ ts: ts, tmc: tmc, - ws: workflow.NewServer(ts, tmc), + ws: workflow.NewServer(ts, tmc, sqlparser.NewTestParser()), } } @@ -268,7 +268,7 @@ func (s *VtctldServer) ApplySchema(ctx context.Context, req *vtctldatapb.ApplySc logstream = append(logstream, e) }) - executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize) + executor := schemamanager.NewTabletExecutor(migrationContext, s.ts, s.tmc, logger, waitReplicasTimeout, req.BatchSize, s.ws.SQLParser()) if err = executor.SetDDLStrategy(req.DdlStrategy); err != nil { err = vterrors.Wrapf(err, "invalid DdlStrategy: %s", req.DdlStrategy) @@ -337,7 +337,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV span.Annotate("sql_mode", true) var stmt sqlparser.Statement - stmt, err = sqlparser.Parse(req.Sql) + stmt, err = s.ws.SQLParser().Parse(req.Sql) if err != nil { err = vterrors.Wrapf(err, "Parse(%s)", req.Sql) return nil, err @@ -368,7 +368,7 @@ func (s *VtctldServer) ApplyVSchema(ctx context.Context, req *vtctldatapb.ApplyV return &vtctldatapb.ApplyVSchemaResponse{VSchema: vs}, nil } - _, err = vindexes.BuildKeyspace(vs) + _, err = vindexes.BuildKeyspace(vs, s.ws.SQLParser()) if err != nil { err = vterrors.Wrapf(err, "BuildKeyspace(%s)", req.Keyspace) return nil, err @@ -4960,8 +4960,8 @@ func (s *VtctldServer) WorkflowUpdate(ctx context.Context, req *vtctldatapb.Work } // StartServer registers a VtctldServer for RPCs on the given gRPC server. -func StartServer(s *grpc.Server, ts *topo.Server) { - vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts)) +func StartServer(s *grpc.Server, ts *topo.Server, parser *sqlparser.Parser) { + vtctlservicepb.RegisterVtctldServer(s, NewVtctldServer(ts, parser)) } // getTopologyCell is a helper method that returns a topology cell given its path. diff --git a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go index 3100855e370..9625d0c281b 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_slow_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_slow_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql" @@ -310,7 +312,7 @@ func TestEmergencyReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -608,7 +610,7 @@ func TestPlannedReparentShardSlow(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -738,7 +740,7 @@ func TestSleepTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) start := time.Now() diff --git a/go/vt/vtctl/grpcvtctldserver/server_test.go b/go/vt/vtctl/grpcvtctldserver/server_test.go index 124c7096bc4..fa223224ea0 100644 --- a/go/vt/vtctl/grpcvtctldserver/server_test.go +++ b/go/vt/vtctl/grpcvtctldserver/server_test.go @@ -28,6 +28,7 @@ import ( "time" _flag "vitess.io/vitess/go/internal/flag" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -85,7 +86,7 @@ func TestPanicHandler(t *testing.T) { }() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, nil, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellInfo(context.Background(), nil) @@ -141,7 +142,7 @@ func TestAddCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellInfo(ctx, tt.req) if tt.shouldErr { @@ -214,7 +215,7 @@ func TestAddCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.AddCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -326,7 +327,7 @@ func TestApplyRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ApplyRoutingRules(ctx, tt.req) if tt.shouldErr { @@ -421,7 +422,7 @@ func TestApplyVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddKeyspace(ctx, t, ts, &vtctldatapb.Keyspace{ @@ -701,7 +702,7 @@ func TestBackup(t *testing.T) { testutil.AddTablet(ctx, t, tt.ts, tt.tablet, nil) } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.Backup(ctx, tt.req) @@ -1041,7 +1042,7 @@ func TestBackupShard(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.BackupShard(ctx, tt.req) @@ -1261,7 +1262,7 @@ func TestCancelSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CancelSchemaMigration(ctx, test.req) @@ -1493,7 +1494,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: ts, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -1539,7 +1542,9 @@ func TestChangeTabletType(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &testutil.TabletManagerClient{ TopoServer: nil, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablet(ctx, t, ts, &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -1760,7 +1765,7 @@ func TestCleanupSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CleanupSchemaMigration(ctx, test.req) @@ -1962,7 +1967,7 @@ func TestForceCutOverSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ForceCutOverSchemaMigration(ctx, test.req) @@ -2166,7 +2171,7 @@ func TestCompleteSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.CompleteSchemaMigration(ctx, test.req) @@ -2422,7 +2427,7 @@ func TestCreateKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) for name, ks := range tt.topo { @@ -2700,7 +2705,7 @@ func TestCreateShard(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) for _, ks := range tt.keyspaces { @@ -2755,7 +2760,7 @@ func TestDeleteCellInfo(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteCellInfo(ctx, tt.req) if tt.shouldErr { @@ -2816,7 +2821,7 @@ func TestDeleteCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -3048,7 +3053,7 @@ func TestDeleteKeyspace(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) defer ts.Close() vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) @@ -3557,12 +3562,12 @@ func TestDeleteShards(t *testing.T) { cells := []string{"zone1", "zone2", "zone3"} - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond*50) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -3705,7 +3710,7 @@ func TestDeleteSrvKeyspace(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.DeleteSrvVSchema(ctx, tt.req) if tt.shouldErr { @@ -4166,7 +4171,7 @@ func TestDeleteTablets(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup tablets and shards @@ -4395,7 +4400,7 @@ func TestEmergencyReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.EmergencyReparentShard(ctx, tt.req) @@ -4537,7 +4542,7 @@ func TestExecuteFetchAsApp(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ExecuteFetchAsApp(ctx, tt.req) if tt.shouldErr { @@ -4664,7 +4669,7 @@ func TestExecuteFetchAsDBA(t *testing.T) { testutil.AddTablet(ctx, t, ts, tt.tablet, nil) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ExecuteFetchAsDBA(ctx, tt.req) if tt.shouldErr { @@ -4849,7 +4854,7 @@ func TestExecuteHook(t *testing.T) { t.Run(tt.name, func(t *testing.T) { testutil.AddTablets(ctx, t, tt.ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ExecuteHook(ctx, tt.req) @@ -4870,7 +4875,7 @@ func TestFindAllShardsInKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) ks := &vtctldatapb.Keyspace{ @@ -4912,7 +4917,7 @@ func TestGetBackups(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.BackupStorage.Backups = map[string][]string{ @@ -5020,7 +5025,7 @@ func TestGetKeyspace(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) expected := &vtctldatapb.GetKeyspaceResponse{ @@ -5046,7 +5051,7 @@ func TestGetCellInfoNames(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -5055,7 +5060,7 @@ func TestGetCellInfoNames(t *testing.T) { ts = memorytopo.NewServer(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err = vtctld.GetCellInfoNames(ctx, &vtctldatapb.GetCellInfoNamesRequest{}) @@ -5064,7 +5069,7 @@ func TestGetCellInfoNames(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) topofactory.SetError(assert.AnError) @@ -5079,7 +5084,7 @@ func TestGetCellInfo(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) expected := &topodatapb.CellInfo{ @@ -5107,7 +5112,7 @@ func TestGetCellsAliases(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "c11", "c12", "c13", "c21", "c22") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) alias1 := &topodatapb.CellsAlias{ @@ -5134,7 +5139,7 @@ func TestGetCellsAliases(t *testing.T) { ts, topofactory := memorytopo.NewServerAndFactory(ctx) vtctld = testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) topofactory.SetError(assert.AnError) @@ -5214,7 +5219,9 @@ func TestGetFullStatus(t *testing.T) { FullStatusResult: &replicationdatapb.FullStatus{ ServerUuid: tt.serverUUID, }, - }, func(ts *topo.Server) vtctlservicepb.VtctldServer { return NewVtctldServer(ts) }) + }, func(ts *topo.Server) vtctlservicepb.VtctldServer { + return NewVtctldServer(ts, sqlparser.NewTestParser()) + }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ AlsoSetShardPrimary: true, @@ -5239,7 +5246,7 @@ func TestGetKeyspaces(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetKeyspaces(ctx, &vtctldatapb.GetKeyspacesRequest{}) @@ -5407,7 +5414,7 @@ func TestGetPermissions(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetPermissions(ctx, tt.req) if tt.shouldErr { @@ -5483,7 +5490,7 @@ func TestGetRoutingRules(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetRoutingRules(ctx, &vtctldatapb.GetRoutingRulesRequest{}) if tt.shouldErr { @@ -5508,7 +5515,7 @@ func TestGetSchema(t *testing.T) { }{}, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) validAlias := &topodatapb.TabletAlias{ @@ -5873,7 +5880,7 @@ func TestGetSchemaMigrations(t *testing.T) { ts, factory := memorytopo.NewServerAndFactory(ctx, cells...) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{AlsoSetShardPrimary: true}, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if test.failTopo { @@ -5964,7 +5971,7 @@ func TestGetShard(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.topo...) @@ -6101,7 +6108,7 @@ func TestGetSrvKeyspaceNames(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.GetSrvKeyspaceNames(ctx, tt.req) if tt.shouldErr { @@ -6258,7 +6265,7 @@ func TestGetSrvKeyspaces(t *testing.T) { testutil.AddSrvKeyspaces(t, ts, tt.srvKeyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if tt.topoErr != nil { @@ -6285,7 +6292,7 @@ func TestGetSrvVSchema(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6496,7 +6503,7 @@ func TestGetSrvVSchemas(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, "zone1", "zone2", "zone3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) zone1SrvVSchema := &vschemapb.SrvVSchema{ @@ -6556,7 +6563,7 @@ func TestGetTablet(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablet := &topodatapb.Tablet{ @@ -7179,7 +7186,7 @@ func TestGetTablets(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, tt.cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) @@ -7203,7 +7210,7 @@ func TestGetTopologyPath(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2", "cell3") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) err := ts.CreateKeyspace(ctx, "keyspace1", &topodatapb.Keyspace{}) @@ -7292,7 +7299,7 @@ func TestGetVSchema(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) t.Run("found", func(t *testing.T) { @@ -7523,7 +7530,7 @@ func TestLaunchSchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.LaunchSchemaMigration(ctx, test.req) @@ -7610,7 +7617,7 @@ func TestPingTablet(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PingTablet(ctx, tt.req) @@ -7858,7 +7865,7 @@ func TestPlannedReparentShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.PlannedReparentShard(ctx, tt.req) @@ -7901,7 +7908,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildKeyspaceGraph(ctx, &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7918,7 +7925,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildKeyspaceGraph(context.Background(), &vtctldatapb.RebuildKeyspaceGraphRequest{ @@ -7938,7 +7945,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) factory.SetError(assert.AnError) @@ -7959,7 +7966,7 @@ func TestRebuildKeyspaceGraph(t *testing.T) { Name: "testkeyspace", }) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) lctx, unlock, lerr := ts.LockKeyspace(context.Background(), "testkeyspace", "test lock") @@ -8008,7 +8015,7 @@ func TestRebuildVSchemaGraph(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RebuildVSchemaGraph(ctx, req) if tt.shouldErr { @@ -8107,7 +8114,7 @@ func TestRefreshState(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RefreshState(ctx, tt.req) if tt.shouldErr { @@ -8292,7 +8299,7 @@ func TestRefreshStateByShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.RefreshStateByShard(ctx, tt.req) if tt.shouldErr { @@ -8396,7 +8403,7 @@ func TestReloadSchema(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.ReloadSchema(ctx, tt.req) if tt.shouldErr { @@ -8494,7 +8501,7 @@ func TestReloadSchemaKeyspace(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ReloadSchemaKeyspace(ctx, tt.req) if tt.shouldErr { @@ -8652,7 +8659,7 @@ func TestReloadSchemaShard(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ReloadSchemaShard(ctx, tt.req) if tt.shouldErr { @@ -8671,7 +8678,7 @@ func TestRemoveBackup(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) setup := func() { @@ -8862,7 +8869,7 @@ func TestRemoveKeyspaceCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup topo @@ -9151,7 +9158,7 @@ func TestRemoveShardCell(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) // Setup shard topos and replication graphs. @@ -9761,7 +9768,7 @@ func TestReparentTablet(t *testing.T) { defer cancel() ts, topofactory := memorytopo.NewServerAndFactory(ctx, cells...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddTablets(ctx, t, ts, &testutil.AddTabletOptions{ @@ -9894,7 +9901,7 @@ func TestRestoreFromBackup(t *testing.T) { }, tt.tablets..., ) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) client := localvtctldclient.New(vtctld) stream, err := client.RestoreFromBackup(ctx, tt.req) @@ -10112,7 +10119,7 @@ func TestRetrySchemaMigration(t *testing.T) { }, test.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, test.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.RetrySchemaMigration(ctx, test.req) @@ -10219,7 +10226,7 @@ func TestRunHealthCheck(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.RunHealthCheck(ctx, tt.req) if tt.shouldErr { @@ -10299,7 +10306,7 @@ func TestSetKeyspaceDurabilityPolicy(t *testing.T) { testutil.AddKeyspaces(ctx, t, ts, tt.keyspaces...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetKeyspaceDurabilityPolicy(ctx, tt.req) if tt.expectedErr != "" { @@ -10358,7 +10365,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { name: "lock error", setup: func(t *testing.T, tt *testcase) context.Context { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10396,7 +10403,7 @@ func TestSetShardIsPrimaryServing(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetShardIsPrimaryServing(tt.ctx, tt.req) if tt.shouldErr { @@ -10610,7 +10617,7 @@ func TestSetShardTabletControl(t *testing.T) { name: "keyspace lock error", setup: func(t *testing.T, tt *testcase) { var cancel func() - tt.ctx, cancel = context.WithTimeout(ctx, time.Millisecond*50) + tt.ctx, cancel = context.WithCancel(ctx) tt.ts = memorytopo.NewServer(ctx, "zone1") testutil.AddShards(tt.ctx, t, tt.ts, &vtctldatapb.Shard{ Keyspace: "testkeyspace", @@ -10646,7 +10653,7 @@ func TestSetShardTabletControl(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.SetShardTabletControl(tt.ctx, tt.req) if tt.shouldErr { @@ -10850,7 +10857,7 @@ func TestSetWritable(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.SetWritable(ctx, tt.req) @@ -10871,7 +10878,7 @@ func TestShardReplicationAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablets := []*topodatapb.Tablet{ @@ -11166,7 +11173,7 @@ func TestShardReplicationPositions(t *testing.T) { }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) requestCtx := ctx @@ -11197,7 +11204,7 @@ func TestShardReplicationRemove(t *testing.T) { ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tablets := []*topodatapb.Tablet{ @@ -11357,7 +11364,7 @@ func TestSourceShardAdd(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11492,7 +11499,7 @@ func TestSourceShardDelete(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) testutil.AddShards(ctx, t, ts, tt.shards...) @@ -11684,7 +11691,7 @@ func TestStartReplication(t *testing.T) { AlsoSetShardPrimary: true, }, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.StartReplication(ctx, tt.req) @@ -11821,7 +11828,7 @@ func TestStopReplication(t *testing.T) { testutil.AddTablets(ctx, t, ts, nil, tt.tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) _, err := vtctld.StopReplication(ctx, tt.req) @@ -12208,7 +12215,7 @@ func TestTabletExternallyReparented(t *testing.T) { TopoServer: ts, } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) if tt.tmcHasNoTopo { @@ -12393,7 +12400,7 @@ func TestUpdateCellInfo(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.UpdateCellInfo(ctx, tt.req) if tt.shouldErr { @@ -12543,7 +12550,7 @@ func TestUpdateCellsAlias(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.UpdateCellsAlias(ctx, tt.req) if tt.shouldErr { @@ -12651,7 +12658,7 @@ func TestValidate(t *testing.T) { SkipShardCreation: false, }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, nil, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.Validate(ctx, &vtctldatapb.ValidateRequest{ @@ -12768,7 +12775,7 @@ func TestValidateSchemaKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) schema1 := &tabletmanagerdatapb.SchemaDefinition{ @@ -12954,7 +12961,7 @@ func TestValidateVersionKeyspace(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tests := []*struct { @@ -13069,7 +13076,7 @@ func TestValidateVersionShard(t *testing.T) { }, tablets...) vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, ts, &tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) tests := []*struct { @@ -13661,7 +13668,7 @@ func TestValidateShard(t *testing.T) { } vtctld := testutil.NewVtctldServerWithTabletManagerClient(t, tt.ts, tt.tmc, func(ts *topo.Server) vtctlservicepb.VtctldServer { - return NewVtctldServer(ts) + return NewVtctldServer(ts, sqlparser.NewTestParser()) }) resp, err := vtctld.ValidateShard(ctx, tt.req) if tt.shouldErr { diff --git a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go index 20c51968a11..736bda4a1f4 100644 --- a/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go +++ b/go/vt/vtctl/grpcvtctldserver/testutil/test_tmclient.go @@ -1374,7 +1374,7 @@ func (fake *TabletManagerClient) VReplicationExec(ctx context.Context, tablet *t if resultsForTablet, ok := fake.VReplicationExecResults[key]; ok { // Round trip the expected query both to ensure it's valid and to // standardize on capitalization and formatting. - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return nil, err } diff --git a/go/vt/vtctl/grpcvtctlserver/server.go b/go/vt/vtctl/grpcvtctlserver/server.go index afd7b9df1c9..29fc4be0651 100644 --- a/go/vt/vtctl/grpcvtctlserver/server.go +++ b/go/vt/vtctl/grpcvtctlserver/server.go @@ -25,6 +25,10 @@ import ( "google.golang.org/grpc" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/topo" @@ -40,12 +44,14 @@ import ( // VtctlServer is our RPC server type VtctlServer struct { vtctlservicepb.UnimplementedVtctlServer - ts *topo.Server + ts *topo.Server + collationEnv *collations.Environment + parser *sqlparser.Parser } // NewVtctlServer returns a new Vtctl Server for the topo server. -func NewVtctlServer(ts *topo.Server) *VtctlServer { - return &VtctlServer{ts: ts} +func NewVtctlServer(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *VtctlServer { + return &VtctlServer{ts: ts, collationEnv: collationEnv, parser: parser} } // ExecuteVtctlCommand is part of the vtctldatapb.VtctlServer interface @@ -72,13 +78,13 @@ func (s *VtctlServer) ExecuteVtctlCommand(args *vtctldatapb.ExecuteVtctlCommandR // create the wrangler tmc := tmclient.NewTabletManagerClient() defer tmc.Close() - wr := wrangler.New(logger, s.ts, tmc) + wr := wrangler.New(logger, s.ts, tmc, s.collationEnv, s.parser) // execute the command return vtctl.RunCommand(stream.Context(), wr, args.Args) } // StartServer registers the VtctlServer for RPCs -func StartServer(s *grpc.Server, ts *topo.Server) { - vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(ts)) +func StartServer(s *grpc.Server, ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) { + vtctlservicepb.RegisterVtctlServer(s, NewVtctlServer(ts, collationEnv, parser)) } diff --git a/go/vt/vtctl/vtctl.go b/go/vt/vtctl/vtctl.go index 96d8bf9e9be..9de6add6f28 100644 --- a/go/vt/vtctl/vtctl.go +++ b/go/vt/vtctl/vtctl.go @@ -2921,7 +2921,7 @@ func commandApplySchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *pf *migrationContext = *requestContext } - parts, err := sqlparser.SplitStatementToPieces(change) + parts, err := wr.SQLParser().SplitStatementToPieces(change) if err != nil { return err } @@ -3341,7 +3341,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p *sql = string(sqlBytes) } - stmt, err := sqlparser.Parse(*sql) + stmt, err := wr.SQLParser().Parse(*sql) if err != nil { return fmt.Errorf("error parsing vschema statement `%s`: %v", *sql, err) } @@ -3392,7 +3392,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p } // Validate the VSchema. - ksVs, err := vindexes.BuildKeyspace(vs) + ksVs, err := vindexes.BuildKeyspace(vs, wr.SQLParser()) if err != nil { return err } @@ -3424,7 +3424,7 @@ func commandApplyVSchema(ctx context.Context, wr *wrangler.Wrangler, subFlags *p return err } - if _, err := vindexes.BuildKeyspace(vs); err != nil { + if _, err := vindexes.BuildKeyspace(vs, wr.SQLParser()); err != nil { return err } diff --git a/go/vt/vtctl/vtctlclient/interface.go b/go/vt/vtctl/vtctlclient/interface.go index e9bf0cdc257..b750cdf8db6 100644 --- a/go/vt/vtctl/vtctlclient/interface.go +++ b/go/vt/vtctl/vtctlclient/interface.go @@ -68,17 +68,6 @@ func RegisterFactory(name string, factory Factory) { factories[name] = factory } -// UnregisterFactoryForTest allows to unregister a client implementation from the static map. -// This function is used by unit tests to cleanly unregister any fake implementations. -// This way, a test package can use the same name for different fakes and no dangling fakes are -// left behind in the static factories map after the test. -func UnregisterFactoryForTest(name string) { - if _, ok := factories[name]; !ok { - log.Fatalf("UnregisterFactoryForTest: %s is not registered", name) - } - delete(factories, name) -} - // New allows a user of the client library to get its implementation. func New(addr string) (VtctlClient, error) { factory, ok := factories[vtctlClientProtocol] diff --git a/go/vt/vtctl/workflow/materializer.go b/go/vt/vtctl/workflow/materializer.go index 52196661eb5..eb9e7c25f32 100644 --- a/go/vt/vtctl/workflow/materializer.go +++ b/go/vt/vtctl/workflow/materializer.go @@ -63,6 +63,8 @@ type materializer struct { isPartial bool primaryVindexesDiffer bool workflowType binlogdatapb.VReplicationWorkflowType + + parser *sqlparser.Parser } func (mz *materializer) getWorkflowSubType() (binlogdatapb.VReplicationWorkflowSubType, error) { @@ -197,7 +199,7 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.parser.Parse(ts.SourceExpression) if err != nil { return "", err } @@ -296,7 +298,7 @@ func (mz *materializer) generateBinlogSources(ctx context.Context, targetShard * } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.parser.Parse(ts.SourceExpression) if err != nil { return nil, err } @@ -406,7 +408,7 @@ func (mz *materializer) deploySchema() error { if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.parser.TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -422,7 +424,7 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.parser) if err != nil { return err } @@ -431,7 +433,7 @@ func (mz *materializer) deploySchema() error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.parser) if err != nil { return err } @@ -452,7 +454,7 @@ func (mz *materializer) deploySchema() error { // We use schemadiff to normalize the schema. // For now, and because this is could have wider implications, we ignore any errors in // reading the source schema. - schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs, mz.parser) if err != nil { log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) } else { @@ -484,7 +486,7 @@ func (mz *materializer) buildMaterializer() error { if err != nil { return err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, mz.parser) if err != nil { return err } @@ -598,22 +600,6 @@ func (mz *materializer) startStreams(ctx context.Context) error { }) } -func Materialize(ctx context.Context, ts *topo.Server, tmc tmclient.TabletManagerClient, ms *vtctldatapb.MaterializeSettings) error { - mz := &materializer{ - ctx: ctx, - ts: ts, - sourceTs: ts, - tmc: tmc, - ms: ms, - } - - err := mz.createMaterializerStreams() - if err != nil { - return err - } - return mz.startStreams(ctx) -} - func (mz *materializer) forAllTargets(f func(*topo.ShardInfo) error) error { var wg sync.WaitGroup allErrors := &concurrency.AllErrorRecorder{} diff --git a/go/vt/vtctl/workflow/materializer_env_test.go b/go/vt/vtctl/workflow/materializer_env_test.go index 1026628405e..14ea59f690e 100644 --- a/go/vt/vtctl/workflow/materializer_env_test.go +++ b/go/vt/vtctl/workflow/materializer_env_test.go @@ -82,7 +82,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.ws = NewServer(env.topoServ, env.tmc) + parser := sqlparser.NewTestParser() + env.ws = NewServer(env.topoServ, env.tmc, parser) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -98,7 +99,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := parser.TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } diff --git a/go/vt/vtctl/workflow/materializer_test.go b/go/vt/vtctl/workflow/materializer_test.go index fc39bb4d30b..f9c1536ddbf 100644 --- a/go/vt/vtctl/workflow/materializer_test.go +++ b/go/vt/vtctl/workflow/materializer_test.go @@ -28,6 +28,8 @@ import ( "golang.org/x/exp/maps" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -134,7 +136,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -208,7 +210,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -3013,7 +3015,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { cell: "cell", tmc: newTestMaterializerTMClient(), } - env.ws = NewServer(env.topoServ, env.tmc) + env.ws = NewServer(env.topoServ, env.tmc, sqlparser.NewTestParser()) defer env.close() tabletID := 100 diff --git a/go/vt/vtctl/workflow/server.go b/go/vt/vtctl/workflow/server.go index bcc138932e2..05b07681c8b 100644 --- a/go/vt/vtctl/workflow/server.go +++ b/go/vt/vtctl/workflow/server.go @@ -142,18 +142,24 @@ type Server struct { ts *topo.Server tmc tmclient.TabletManagerClient // Limit the number of concurrent background goroutines if needed. - sem *semaphore.Weighted + sem *semaphore.Weighted + parser *sqlparser.Parser } // NewServer returns a new server instance with the given topo.Server and // TabletManagerClient. -func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient) *Server { +func NewServer(ts *topo.Server, tmc tmclient.TabletManagerClient, parser *sqlparser.Parser) *Server { return &Server{ - ts: ts, - tmc: tmc, + ts: ts, + tmc: tmc, + parser: parser, } } +func (s *Server) SQLParser() *sqlparser.Parser { + return s.parser +} + // CheckReshardingJournalExistsOnTablet returns the journal (or an empty // journal) and a boolean to indicate if the resharding_journal table exists on // the given tablet. @@ -407,7 +413,7 @@ func (s *Server) GetWorkflows(ctx context.Context, req *vtctldatapb.GetWorkflows where, ) - vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, "", s.ts, s.tmc, s.SQLParser()) results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err @@ -1243,6 +1249,7 @@ func (s *Server) Materialize(ctx context.Context, ms *vtctldatapb.MaterializeSet sourceTs: s.ts, tmc: s.tmc, ms: ms, + parser: s.SQLParser(), } err := mz.createMaterializerStreams() @@ -1382,6 +1389,7 @@ func (s *Server) moveTablesCreate(ctx context.Context, req *vtctldatapb.MoveTabl tmc: s.tmc, ms: ms, workflowType: workflowType, + parser: s.SQLParser(), } err = mz.createMoveTablesStreams(req) if err != nil { @@ -1838,7 +1846,7 @@ func (s *Server) WorkflowDelete(ctx context.Context, req *vtctldatapb.WorkflowDe deleteReq := &tabletmanagerdatapb.DeleteVReplicationWorkflowRequest{ Workflow: req.Workflow, } - vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.Workflow, s.ts, s.tmc, s.SQLParser()) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.DeleteVReplicationWorkflow(ctx, tablet.Tablet, deleteReq) if err != nil { @@ -2114,7 +2122,7 @@ func (s *Server) WorkflowUpdate(ctx context.Context, req *vtctldatapb.WorkflowUp span.Annotate("on_ddl", req.TabletRequest.OnDdl) span.Annotate("state", req.TabletRequest.State) - vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc) + vx := vexec.NewVExec(req.Keyspace, req.TabletRequest.Workflow, s.ts, s.tmc, s.SQLParser()) callback := func(ctx context.Context, tablet *topo.TabletInfo) (*querypb.QueryResult, error) { res, err := s.tmc.UpdateVReplicationWorkflow(ctx, tablet.Tablet, req.TabletRequest) if err != nil { @@ -2527,7 +2535,7 @@ func (s *Server) buildTrafficSwitcher(ctx context.Context, targetKeyspace, workf if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, s.SQLParser()) if err != nil { return nil, err } @@ -3116,7 +3124,7 @@ func (s *Server) switchWrites(ctx context.Context, req *vtctldatapb.WorkflowSwit } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := BuildStreamMigrator(ctx, ts, cancel) + sm, err := BuildStreamMigrator(ctx, ts, cancel, s.parser) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -3446,7 +3454,7 @@ func (s *Server) prepareCreateLookup(ctx context.Context, workflow, keyspace str if !strings.Contains(vindex.Type, "lookup") { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = s.parser.ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "vindex table name (%s) must be in the form .", vindex.Params["table"]) } diff --git a/go/vt/vtctl/workflow/server_test.go b/go/vt/vtctl/workflow/server_test.go index 85c60336351..e3b33e19dc9 100644 --- a/go/vt/vtctl/workflow/server_test.go +++ b/go/vt/vtctl/workflow/server_test.go @@ -25,6 +25,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/topo/topoproto" @@ -142,7 +144,7 @@ func TestCheckReshardingJournalExistsOnTablet(t *testing.T) { }, } - ws := NewServer(nil, tmc) + ws := NewServer(nil, tmc, sqlparser.NewTestParser()) journal, exists, err := ws.CheckReshardingJournalExistsOnTablet(ctx, tt.tablet, 1) if tt.shouldErr { assert.Error(t, err) diff --git a/go/vt/vtctl/workflow/stream_migrator.go b/go/vt/vtctl/workflow/stream_migrator.go index 75d509614b7..23d382d8062 100644 --- a/go/vt/vtctl/workflow/stream_migrator.go +++ b/go/vt/vtctl/workflow/stream_migrator.go @@ -61,14 +61,16 @@ type StreamMigrator struct { templates []*VReplicationStream ts ITrafficSwitcher logger logutil.Logger + parser *sqlparser.Parser } // BuildStreamMigrator creates a new StreamMigrator based on the given // TrafficSwitcher. -func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool) (*StreamMigrator, error) { +func BuildStreamMigrator(ctx context.Context, ts ITrafficSwitcher, cancelMigrate bool, parser *sqlparser.Parser) (*StreamMigrator, error) { sm := &StreamMigrator{ ts: ts, logger: ts.Logger(), + parser: parser, } if sm.ts.MigrationType() == binlogdatapb.MigrationType_TABLES { @@ -674,7 +676,7 @@ func (sm *StreamMigrator) templatizeRule(ctx context.Context, rule *binlogdatapb } func (sm *StreamMigrator) templatizeKeyRange(ctx context.Context, rule *binlogdatapb.Rule) error { - statement, err := sqlparser.Parse(rule.Filter) + statement, err := sm.parser.Parse(rule.Filter) if err != nil { return err } diff --git a/go/vt/vtctl/workflow/stream_migrator_test.go b/go/vt/vtctl/workflow/stream_migrator_test.go index 04f787eb4d4..38ae10280f7 100644 --- a/go/vt/vtctl/workflow/stream_migrator_test.go +++ b/go/vt/vtctl/workflow/stream_migrator_test.go @@ -24,6 +24,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -304,7 +306,7 @@ func TestTemplatize(t *testing.T) { }, }, } - ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks") + ksschema, err := vindexes.BuildKeyspaceSchema(vs, "ks", sqlparser.NewTestParser()) require.NoError(t, err, "could not create test keyspace %+v", vs) ts := &testTrafficSwitcher{ diff --git a/go/vt/vtctl/workflow/traffic_switcher.go b/go/vt/vtctl/workflow/traffic_switcher.go index 9999d46cdcc..104d024ebd5 100644 --- a/go/vt/vtctl/workflow/traffic_switcher.go +++ b/go/vt/vtctl/workflow/traffic_switcher.go @@ -609,7 +609,8 @@ func (ts *trafficSwitcher) switchTableReads(ctx context.Context, cells []string, func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { return ts.ForAllSources(func(source *MigrationSource) error { - query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(source.GetPrimary().DbName()), encodeString(ts.ReverseWorkflowName())) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) return err }) diff --git a/go/vt/vtctl/workflow/utils.go b/go/vt/vtctl/workflow/utils.go index 4d1a3c5df4d..f56b721a415 100644 --- a/go/vt/vtctl/workflow/utils.go +++ b/go/vt/vtctl/workflow/utils.go @@ -167,8 +167,8 @@ func createDefaultShardRoutingRules(ctx context.Context, ms *vtctldatapb.Materia return nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -189,8 +189,8 @@ func stripTableConstraints(ddl string) (string, error) { return newDDL, nil } -func stripTableForeignKeys(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } diff --git a/go/vt/vtctl/workflow/vexec/query_planner_test.go b/go/vt/vtctl/workflow/vexec/query_planner_test.go index 9199c8a0947..0baa1718b14 100644 --- a/go/vt/vtctl/workflow/vexec/query_planner_test.go +++ b/go/vt/vtctl/workflow/vexec/query_planner_test.go @@ -357,7 +357,7 @@ func TestVReplicationLogQueryPlanner(t *testing.T) { t.Parallel() planner := NewVReplicationLogQueryPlanner(nil, tt.targetStreamIDs) - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err, "could not parse query %q", tt.query) qp, err := planner.planSelect(stmt.(*sqlparser.Select)) if tt.shouldErr { diff --git a/go/vt/vtctl/workflow/vexec/testutil/query.go b/go/vt/vtctl/workflow/vexec/testutil/query.go index 3988f7a112f..1add74e5b02 100644 --- a/go/vt/vtctl/workflow/vexec/testutil/query.go +++ b/go/vt/vtctl/workflow/vexec/testutil/query.go @@ -41,7 +41,7 @@ func ParsedQueryFromString(t *testing.T, query string) *sqlparser.ParsedQuery { func StatementFromString(t *testing.T, query string) sqlparser.Statement { t.Helper() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err, "could not parse query %v", query) return stmt diff --git a/go/vt/vtctl/workflow/vexec/vexec.go b/go/vt/vtctl/workflow/vexec/vexec.go index 477b81a1a03..54591b2c306 100644 --- a/go/vt/vtctl/workflow/vexec/vexec.go +++ b/go/vt/vtctl/workflow/vexec/vexec.go @@ -95,6 +95,8 @@ type VExec struct { // to support running in modes like: // - Execute serially rather than concurrently. // - Only return error if greater than some percentage of the targets fail. + + parser *sqlparser.Parser } // NewVExec returns a new instance suitable for making vexec queries to a given @@ -102,12 +104,13 @@ type VExec struct { // string). The provided topo server is used to look up target tablets for // queries. A given instance will discover targets exactly once for its // lifetime, so to force a refresh, create another instance. -func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient) *VExec { +func NewVExec(keyspace string, workflow string, ts *topo.Server, tmc tmclient.TabletManagerClient, parser *sqlparser.Parser) *VExec { return &VExec{ ts: ts, tmc: tmc, keyspace: keyspace, workflow: workflow, + parser: parser, } } @@ -127,7 +130,7 @@ func (vx *VExec) QueryContext(ctx context.Context, query string) (map[*topo.Tabl } } - stmt, err := sqlparser.Parse(query) + stmt, err := vx.parser.Parse(query) if err != nil { return nil, err } @@ -299,6 +302,7 @@ func (vx *VExec) WithWorkflow(workflow string) *VExec { ts: vx.ts, tmc: vx.tmc, primaries: vx.primaries, + parser: vx.parser, workflow: workflow, } } diff --git a/go/vt/vtctld/action_repository.go b/go/vt/vtctld/action_repository.go index 0076ee65ba6..095beb2ae90 100644 --- a/go/vt/vtctld/action_repository.go +++ b/go/vt/vtctld/action_repository.go @@ -23,6 +23,10 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" @@ -83,16 +87,20 @@ type ActionRepository struct { shardActions map[string]actionShardMethod tabletActions map[string]actionTabletRecord ts *topo.Server + collationEnv *collations.Environment + parser *sqlparser.Parser } // NewActionRepository creates and returns a new ActionRepository, // with no actions. -func NewActionRepository(ts *topo.Server) *ActionRepository { +func NewActionRepository(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) *ActionRepository { return &ActionRepository{ keyspaceActions: make(map[string]actionKeyspaceMethod), shardActions: make(map[string]actionShardMethod), tabletActions: make(map[string]actionTabletRecord), ts: ts, + collationEnv: collationEnv, + parser: parser, } } @@ -125,7 +133,7 @@ func (ar *ActionRepository) ApplyKeyspaceAction(ctx context.Context, actionName, } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action(ctx, wr, keyspace) cancel() if err != nil { @@ -152,7 +160,7 @@ func (ar *ActionRepository) ApplyShardAction(ctx context.Context, actionName, ke } ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action(ctx, wr, keyspace, shard) cancel() if err != nil { @@ -186,7 +194,7 @@ func (ar *ActionRepository) ApplyTabletAction(ctx context.Context, actionName st // run the action ctx, cancel := context.WithTimeout(ctx, actionTimeout) - wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ar.ts, tmclient.NewTabletManagerClient(), ar.collationEnv, ar.parser) output, err := action.method(ctx, wr, tabletAlias) cancel() if err != nil { diff --git a/go/vt/vtctld/api.go b/go/vt/vtctld/api.go index 43afcb29452..92778ce83af 100644 --- a/go/vt/vtctld/api.go +++ b/go/vt/vtctld/api.go @@ -487,7 +487,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { logstream := logutil.NewMemoryLogger() - wr := wrangler.New(logstream, ts, tmClient) + wr := wrangler.New(logstream, ts, tmClient, actions.collationEnv, actions.parser) err := vtctl.RunCommand(r.Context(), wr, args) if err != nil { resp.Error = err.Error() @@ -523,7 +523,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { logger := logutil.NewCallbackLogger(func(ev *logutilpb.Event) { w.Write([]byte(logutil.EventString(ev))) }) - wr := wrangler.New(logger, ts, tmClient) + wr := wrangler.New(logger, ts, tmClient, actions.collationEnv, actions.parser) apiCallUUID, err := schema.CreateUUID() if err != nil { @@ -531,7 +531,7 @@ func initAPI(ctx context.Context, ts *topo.Server, actions *ActionRepository) { } requestContext := fmt.Sprintf("vtctld/api:%s", apiCallUUID) - executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0) + executor := schemamanager.NewTabletExecutor(requestContext, wr.TopoServer(), wr.TabletManagerClient(), wr.Logger(), time.Duration(req.ReplicaTimeoutSeconds)*time.Second, 0, actions.parser) if err := executor.SetDDLStrategy(req.DDLStrategy); err != nil { return fmt.Errorf("error setting DDL strategy: %v", err) } diff --git a/go/vt/vtctld/api_test.go b/go/vt/vtctld/api_test.go index 45d75752afd..780ff26e6ff 100644 --- a/go/vt/vtctld/api_test.go +++ b/go/vt/vtctld/api_test.go @@ -27,6 +27,10 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/servenv/testutils" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/wrangler" @@ -47,7 +51,7 @@ func TestAPI(t *testing.T) { cells := []string{"cell1", "cell2"} ts := memorytopo.NewServer(ctx, cells...) defer ts.Close() - actionRepo := NewActionRepository(ts) + actionRepo := NewActionRepository(ts, collations.MySQL8(), sqlparser.NewTestParser()) server := testutils.HTTPTestServer() defer server.Close() diff --git a/go/vt/vtctld/tablet_data_test.go b/go/vt/vtctld/tablet_data_test.go index d40c6647ef3..12df3b27c6a 100644 --- a/go/vt/vtctld/tablet_data_test.go +++ b/go/vt/vtctld/tablet_data_test.go @@ -25,6 +25,10 @@ import ( "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/logutil" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -112,7 +116,7 @@ func TestTabletData(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") defer ts.Close() - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) if err := ts.CreateKeyspace(context.Background(), "ks", &topodatapb.Keyspace{}); err != nil { t.Fatalf("CreateKeyspace failed: %v", err) diff --git a/go/vt/vtctld/vtctld.go b/go/vt/vtctld/vtctld.go index ab9cf24c9a5..8093ded1371 100644 --- a/go/vt/vtctld/vtctld.go +++ b/go/vt/vtctld/vtctld.go @@ -23,6 +23,10 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/acl" @@ -48,8 +52,8 @@ func registerVtctldFlags(fs *pflag.FlagSet) { } // InitVtctld initializes all the vtctld functionality. -func InitVtctld(ts *topo.Server) error { - actionRepo := NewActionRepository(ts) +func InitVtctld(ts *topo.Server, collationEnv *collations.Environment, parser *sqlparser.Parser) error { + actionRepo := NewActionRepository(ts, collationEnv, parser) // keyspace actions actionRepo.RegisterKeyspaceAction("ValidateKeyspace", diff --git a/go/vt/vtexplain/vtexplain.go b/go/vt/vtexplain/vtexplain.go index b15d5d2af3a..8f7cec502c2 100644 --- a/go/vt/vtexplain/vtexplain.go +++ b/go/vt/vtexplain/vtexplain.go @@ -28,6 +28,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/vtgate" @@ -144,6 +146,9 @@ type ( // time simulator batchTime *sync2.Batcher globalTabletEnv *tabletEnv + + collationEnv *collations.Environment + parser *sqlparser.Parser } ) @@ -179,25 +184,29 @@ type TabletActions struct { } // Init sets up the fake execution environment -func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options) (*VTExplain, error) { +func Init(ctx context.Context, vSchemaStr, sqlSchema, ksShardMapStr string, opts *Options, collationEnv *collations.Environment, parser *sqlparser.Parser) (*VTExplain, error) { // Verify options if opts.ReplicationMode != "ROW" && opts.ReplicationMode != "STATEMENT" { return nil, fmt.Errorf("invalid replication mode \"%s\"", opts.ReplicationMode) } - parsedDDLs, err := parseSchema(sqlSchema, opts) + parsedDDLs, err := parseSchema(sqlSchema, opts, parser) if err != nil { return nil, fmt.Errorf("parseSchema: %v", err) } - tabletEnv, err := newTabletEnvironment(parsedDDLs, opts) + tabletEnv, err := newTabletEnvironment(parsedDDLs, opts, collationEnv) if err != nil { return nil, fmt.Errorf("initTabletEnvironment: %v", err) } - vte := &VTExplain{vtgateSession: &vtgatepb.Session{ - TargetString: "", - Autocommit: true, - }} + vte := &VTExplain{ + vtgateSession: &vtgatepb.Session{ + TargetString: "", + Autocommit: true, + }, + collationEnv: collationEnv, + parser: parser, + } vte.setGlobalTabletEnv(tabletEnv) err = vte.initVtgateExecutor(ctx, vSchemaStr, ksShardMapStr, opts) if err != nil { @@ -225,10 +234,10 @@ func (vte *VTExplain) Stop() { } } -func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, error) { +func parseSchema(sqlSchema string, opts *Options, parser *sqlparser.Parser) ([]sqlparser.DDLStatement, error) { parsedDDLs := make([]sqlparser.DDLStatement, 0, 16) for { - sql, rem, err := sqlparser.SplitStatement(sqlSchema) + sql, rem, err := parser.SplitStatement(sqlSchema) sqlSchema = rem if err != nil { return nil, err @@ -243,12 +252,12 @@ func parseSchema(sqlSchema string, opts *Options) ([]sqlparser.DDLStatement, err var stmt sqlparser.Statement if opts.StrictDDL { - stmt, err = sqlparser.ParseStrictDDL(sql) + stmt, err = parser.ParseStrictDDL(sql) if err != nil { return nil, err } } else { - stmt, err = sqlparser.Parse(sql) + stmt, err = parser.Parse(sql) if err != nil { log.Errorf("ERROR: failed to parse sql: %s, got error: %v", sql, err) continue @@ -292,7 +301,7 @@ func (vte *VTExplain) Run(sql string) ([]*Explain, error) { sql = s } - sql, rem, err = sqlparser.SplitStatement(sql) + sql, rem, err = vte.parser.SplitStatement(sql) if err != nil { return nil, err } @@ -379,7 +388,7 @@ func (vte *VTExplain) specialHandlingOfSavepoints(q *MysqlQuery) error { return nil } - stmt, err := sqlparser.Parse(q.SQL) + stmt, err := vte.parser.Parse(q.SQL) if err != nil { return err } diff --git a/go/vt/vtexplain/vtexplain_test.go b/go/vt/vtexplain/vtexplain_test.go index 54f1efbc522..257b1d38406 100644 --- a/go/vt/vtexplain/vtexplain_test.go +++ b/go/vt/vtexplain/vtexplain_test.go @@ -28,6 +28,10 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/key" @@ -65,7 +69,7 @@ func initTest(ctx context.Context, mode string, opts *Options, topts *testopts, } opts.ExecutionMode = mode - vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts) + vte, err := Init(ctx, string(vSchema), string(schema), shardmap, opts, collations.MySQL8(), sqlparser.NewTestParser()) require.NoError(t, err, "vtexplain Init error\n%s", string(schema)) return vte } @@ -344,7 +348,7 @@ func TestInit(t *testing.T) { } }` schema := "create table table_missing_primary_vindex (id int primary key)" - _, err := Init(ctx, vschema, schema, "", defaultTestOpts()) + _, err := Init(ctx, vschema, schema, "", defaultTestOpts(), collations.MySQL8(), sqlparser.NewTestParser()) require.Error(t, err) require.Contains(t, err.Error(), "missing primary col vindex") } diff --git a/go/vt/vtexplain/vtexplain_vtgate.go b/go/vt/vtexplain/vtexplain_vtgate.go index 8167c510b01..80994ef9474 100644 --- a/go/vt/vtexplain/vtexplain_vtgate.go +++ b/go/vt/vtexplain/vtexplain_vtgate.go @@ -75,7 +75,7 @@ func (vte *VTExplain) initVtgateExecutor(ctx context.Context, vSchemaStr, ksShar var schemaTracker vtgate.SchemaInfo // no schema tracker for these tests queryLogBufferSize := 10 plans := theine.NewStore[vtgate.PlanCacheKey, *engine.Plan](4*1024*1024, false) - vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0) + vte.vtgateExecutor = vtgate.NewExecutor(ctx, vte.explainTopo, vtexplainCell, resolver, opts.Normalize, false, streamSize, plans, schemaTracker, false, opts.PlannerVersion, 0, vte.collationEnv, vte.parser) vte.vtgateExecutor.SetQueryLogger(streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize)) return nil @@ -107,7 +107,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS if err != nil { return err } - schema := vindexes.BuildVSchema(&srvVSchema) + schema := vindexes.BuildVSchema(&srvVSchema, vte.parser) for ks, ksSchema := range schema.Keyspaces { if ksSchema.Error != nil { return vterrors.Wrapf(ksSchema.Error, "vschema failed to load on keyspace [%s]", ks) @@ -143,7 +143,7 @@ func (vte *VTExplain) buildTopology(ctx context.Context, opts *Options, vschemaS log.Infof("registering test tablet %s for keyspace %s shard %s", hostname, ks, shard.Name) tablet := vte.healthCheck.AddFakeTablet(vtexplainCell, hostname, 1, ks, shard.Name, topodatapb.TabletType_PRIMARY, true, 1, nil, func(t *topodatapb.Tablet) queryservice.QueryService { - return vte.newTablet(ctx, opts, t) + return vte.newTablet(ctx, opts, t, vte.collationEnv, vte.parser) }) vte.explainTopo.TabletConns[hostname] = tablet.(*explainTablet) vte.explainTopo.KeyspaceShards[ks][shard.Name] = shard diff --git a/go/vt/vtexplain/vtexplain_vttablet.go b/go/vt/vtexplain/vtexplain_vttablet.go index 85aa64037a7..fa01fc319b4 100644 --- a/go/vt/vtexplain/vtexplain_vttablet.go +++ b/go/vt/vtexplain/vtexplain_vttablet.go @@ -98,13 +98,15 @@ type explainTablet struct { mysqlQueries []*MysqlQuery currentTime int vte *VTExplain + + collationEnv *collations.Environment } var _ queryservice.QueryService = (*explainTablet)(nil) -func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet) *explainTablet { +func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatapb.Tablet, collationEnv *collations.Environment, parser *sqlparser.Parser) *explainTablet { db := fakesqldb.New(nil) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, vte.parser) config := tabletenv.NewCurrentConfig() config.TrackSchemaVersions = false @@ -117,9 +119,9 @@ func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatap config.EnableTableGC = false // XXX much of this is cloned from the tabletserver tests - tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias) + tsv := tabletserver.NewTabletServer(ctx, topoproto.TabletAliasString(t.Alias), config, memorytopo.NewServer(ctx, ""), t.Alias, collationEnv, parser) - tablet := explainTablet{db: db, tsv: tsv, vte: vte} + tablet := explainTablet{db: db, tsv: tsv, vte: vte, collationEnv: collationEnv} db.Handler = &tablet tablet.QueryService = queryservice.Wrap( @@ -129,7 +131,7 @@ func (vte *VTExplain) newTablet(ctx context.Context, opts *Options, t *topodatap }, ) - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params dbcfgs := dbconfigs.NewTestDBConfigs(cp, cp, "") cnf := mysqlctl.NewMycnf(22222, 6802) @@ -280,7 +282,7 @@ func (t *explainTablet) Close(ctx context.Context) error { return t.tsv.Close(ctx) } -func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tabletEnv, error) { +func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options, collationEnv *collations.Environment) (*tabletEnv, error) { tEnv := newTabletEnv() schemaQueries := map[string]*sqltypes.Result{ "select unix_timestamp()": { @@ -479,7 +481,7 @@ func newTabletEnvironment(ddls []sqlparser.DDLStatement, opts *Options) (*tablet colType := &querypb.Field{ Name: "column_type", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collationEnv.DefaultConnectionCharset()), } colTypes = append(colTypes, colType) for _, col := range ddl.GetTableSpec().Columns { @@ -581,7 +583,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { // Parse the select statement to figure out the table and columns // that were referenced so that the synthetic response has the // expected field names and types. - stmt, err := sqlparser.Parse(query) + stmt, err := t.vte.parser.Parse(query) if err != nil { return nil, err } @@ -646,7 +648,7 @@ func (t *explainTablet) handleSelect(query string) (*sqltypes.Result, error) { rows := make([][]sqltypes.Value, 0, rowCount) for i, col := range colNames { colType := colTypes[i] - cs := collations.DefaultCollationForType(colType) + cs := collations.CollationForType(colType, t.collationEnv.DefaultConnectionCharset()) fields[i] = &querypb.Field{ Name: col, Type: colType, @@ -734,7 +736,7 @@ func (t *explainTablet) analyzeWhere(selStmt *sqlparser.Select, tableColumnMap m // Check if we have a duplicate value isNewValue := true for _, v := range inVal { - result, err := evalengine.NullsafeCompare(v, value, collations.Default()) + result, err := evalengine.NullsafeCompare(v, value, t.collationEnv, t.collationEnv.DefaultConnectionCharset()) if err != nil { return "", nil, 0, nil, err } diff --git a/go/vt/vtexplain/vtexplain_vttablet_test.go b/go/vt/vtexplain/vtexplain_vttablet_test.go index 614ad186224..601df4b8e79 100644 --- a/go/vt/vtexplain/vtexplain_vttablet_test.go +++ b/go/vt/vtexplain/vtexplain_vttablet_test.go @@ -24,6 +24,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -70,7 +74,9 @@ create table t2 ( ctx, cancel := context.WithCancel(context.Background()) defer cancel() - vte, err := Init(ctx, testVSchema, testSchema, "", opts) + collationEnv := collations.MySQL8() + parser := sqlparser.NewTestParser() + vte, err := Init(ctx, testVSchema, testSchema, "", opts, collationEnv, parser) require.NoError(t, err) defer vte.Stop() @@ -117,8 +123,9 @@ create table test_partitioned ( PARTITION p2018_06_16 VALUES LESS THAN (1529132400) ENGINE = InnoDB, PARTITION p2018_06_17 VALUES LESS THAN (1529218800) ENGINE = InnoDB)*/; ` - - ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}) + collationEnv := collations.MySQL8() + parser := sqlparser.NewTestParser() + ddls, err := parseSchema(testSchema, &Options{StrictDDL: false}, parser) if err != nil { t.Fatalf("parseSchema: %v", err) } @@ -128,14 +135,14 @@ create table test_partitioned ( vte := initTest(ctx, ModeMulti, defaultTestOpts(), &testopts{}, t) defer vte.Stop() - tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts()) + tabletEnv, _ := newTabletEnvironment(ddls, defaultTestOpts(), collationEnv) vte.setGlobalTabletEnv(tabletEnv) tablet := vte.newTablet(ctx, defaultTestOpts(), &topodatapb.Tablet{ Keyspace: "test_keyspace", Shard: "-80", Alias: &topodatapb.TabletAlias{}, - }) + }, collationEnv, parser) se := tablet.tsv.SchemaEngine() tables := se.GetSchema() @@ -181,9 +188,9 @@ create table test_partitioned ( func TestErrParseSchema(t *testing.T) { testSchema := `create table t1 like t2` - ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}) + ddl, err := parseSchema(testSchema, &Options{StrictDDL: true}, sqlparser.NewTestParser()) require.NoError(t, err) - _, err = newTabletEnvironment(ddl, defaultTestOpts()) + _, err = newTabletEnvironment(ddl, defaultTestOpts(), collations.MySQL8()) require.Error(t, err, "check your schema, table[t2] doesn't exist") } diff --git a/go/vt/vtgate/buffer/flags.go b/go/vt/vtgate/buffer/flags.go index a17cc09ccc3..b45f10a6e38 100644 --- a/go/vt/vtgate/buffer/flags.go +++ b/go/vt/vtgate/buffer/flags.go @@ -162,16 +162,6 @@ func NewDefaultConfig() *Config { } } -// EnableBuffering is used in tests where we require the keyspace event watcher to be created -func EnableBuffering() { - bufferEnabled = true -} - -// DisableBuffering is the counterpart of EnableBuffering -func DisableBuffering() { - bufferEnabled = false -} - func NewConfigFromFlags() *Config { if err := verifyFlags(); err != nil { log.Fatalf("Invalid buffer configuration: %v", err) diff --git a/go/vt/vtgate/engine/aggregations.go b/go/vt/vtgate/engine/aggregations.go index 0a72e263e21..96e8cc294a9 100644 --- a/go/vt/vtgate/engine/aggregations.go +++ b/go/vt/vtgate/engine/aggregations.go @@ -49,14 +49,17 @@ type AggregateParams struct { // This is based on the function passed in the select expression and // not what we use to aggregate at the engine primitive level. OrigOpcode AggregateOpcode + + CollationEnv *collations.Environment } -func NewAggregateParam(opcode AggregateOpcode, col int, alias string) *AggregateParams { +func NewAggregateParam(opcode AggregateOpcode, col int, alias string, collationEnv *collations.Environment) *AggregateParams { out := &AggregateParams{ - Opcode: opcode, - Col: col, - Alias: alias, - WCol: -1, + Opcode: opcode, + Col: col, + Alias: alias, + WCol: -1, + CollationEnv: collationEnv, } if opcode.NeedsComparableValues() { out.KeyCol = col @@ -74,7 +77,7 @@ func (ap *AggregateParams) String() string { keyCol = fmt.Sprintf("%s|%d", keyCol, ap.WCol) } if sqltypes.IsText(ap.Type.Type()) && ap.Type.Collation() != collations.Unknown { - keyCol += " COLLATE " + collations.Local().LookupName(ap.Type.Collation()) + keyCol += " COLLATE " + ap.CollationEnv.LookupName(ap.Type.Collation()) } dispOrigOp := "" if ap.OrigOpcode != AggregateUnassigned && ap.OrigOpcode != ap.Opcode { @@ -100,9 +103,10 @@ type aggregator interface { } type aggregatorDistinct struct { - column int - last sqltypes.Value - coll collations.ID + column int + last sqltypes.Value + coll collations.ID + collationEnv *collations.Environment } func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { @@ -111,7 +115,7 @@ func (a *aggregatorDistinct) shouldReturn(row []sqltypes.Value) (bool, error) { next := row[a.column] if !last.IsNull() { if last.TinyWeightCmp(next) == 0 { - cmp, err := evalengine.NullsafeCompare(last, next, a.coll) + cmp, err := evalengine.NullsafeCompare(last, next, a.collationEnv, a.coll) if err != nil { return true, err } @@ -379,8 +383,9 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorCount{ from: aggr.Col, distinct: aggregatorDistinct{ - column: distinct, - coll: aggr.Type.Collation(), + column: distinct, + coll: aggr.Type.Collation(), + collationEnv: aggr.CollationEnv, }, } @@ -397,8 +402,9 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg from: aggr.Col, sum: sum, distinct: aggregatorDistinct{ - column: distinct, - coll: aggr.Type.Collation(), + column: distinct, + coll: aggr.Type.Collation(), + collationEnv: aggr.CollationEnv, }, } @@ -406,7 +412,7 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorMin{ aggregatorMinMax{ from: aggr.Col, - minmax: evalengine.NewAggregationMinMax(sourceType, aggr.Type.Collation()), + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationEnv, aggr.Type.Collation()), }, } @@ -414,7 +420,7 @@ func newAggregation(fields []*querypb.Field, aggregates []*AggregateParams) (agg ag = &aggregatorMax{ aggregatorMinMax{ from: aggr.Col, - minmax: evalengine.NewAggregationMinMax(sourceType, aggr.Type.Collation()), + minmax: evalengine.NewAggregationMinMax(sourceType, aggr.CollationEnv, aggr.Type.Collation()), }, } diff --git a/go/vt/vtgate/engine/cached_size.go b/go/vt/vtgate/engine/cached_size.go index 6da8b1b56d4..93c5e97bd89 100644 --- a/go/vt/vtgate/engine/cached_size.go +++ b/go/vt/vtgate/engine/cached_size.go @@ -35,7 +35,7 @@ func (cached *AggregateParams) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(96) + size += int64(112) } // field Alias string size += hack.RuntimeAllocSize(int64(len(cached.Alias))) @@ -45,6 +45,8 @@ func (cached *AggregateParams) CachedSize(alloc bool) int64 { } // field Original *vitess.io/vitess/go/vt/sqlparser.AliasedExpr size += cached.Original.CachedSize(true) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *AlterVSchema) CachedSize(alloc bool) int64 { @@ -67,10 +69,12 @@ func (cached *CheckCol) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(32) + size += int64(48) } // field WsCol *int size += hack.RuntimeAllocSize(int64(8)) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } @@ -199,7 +203,7 @@ func (cached *Distinct) CachedSize(alloc bool) int64 { } // field CheckCols []vitess.io/vitess/go/vt/vtgate/engine.CheckCol { - size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(32)) + size += hack.RuntimeAllocSize(int64(cap(cached.CheckCols)) * int64(40)) for _, elem := range cached.CheckCols { size += elem.CachedSize(false) } @@ -352,6 +356,8 @@ func (cached *GroupByParams) CachedSize(alloc bool) int64 { if cc, ok := cached.Expr.(cachedObject); ok { size += cc.CachedSize(true) } + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *HashJoin) CachedSize(alloc bool) int64 { @@ -378,6 +384,8 @@ func (cached *HashJoin) CachedSize(alloc bool) int64 { if cc, ok := cached.ASTPred.(cachedObject); ok { size += cc.CachedSize(true) } + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *Insert) CachedSize(alloc bool) int64 { @@ -617,7 +625,10 @@ func (cached *MemorySort) CachedSize(alloc bool) int64 { } // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(48)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } } // field Input vitess.io/vitess/go/vt/vtgate/engine.Primitive if cc, ok := cached.Input.(cachedObject); ok { @@ -644,7 +655,10 @@ func (cached *MergeSort) CachedSize(alloc bool) int64 { } // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(48)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } } return size } @@ -710,6 +724,8 @@ func (cached *OrderedAggregate) CachedSize(alloc bool) int64 { if cc, ok := cached.Input.(cachedObject); ok { size += cc.CachedSize(true) } + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *Plan) CachedSize(alloc bool) int64 { @@ -849,7 +865,10 @@ func (cached *Route) CachedSize(alloc bool) int64 { size += hack.RuntimeAllocSize(int64(len(cached.FieldQuery))) // field OrderBy vitess.io/vitess/go/vt/vtgate/evalengine.Comparison { - size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(36)) + size += hack.RuntimeAllocSize(int64(cap(cached.OrderBy)) * int64(48)) + for _, elem := range cached.OrderBy { + size += elem.CachedSize(false) + } } // field RoutingParameters *vitess.io/vitess/go/vt/vtgate/engine.RoutingParameters size += cached.RoutingParameters.CachedSize(true) diff --git a/go/vt/vtgate/engine/distinct.go b/go/vt/vtgate/engine/distinct.go index c7d6742c136..6eea40d281a 100644 --- a/go/vt/vtgate/engine/distinct.go +++ b/go/vt/vtgate/engine/distinct.go @@ -39,14 +39,16 @@ type ( Truncate int } CheckCol struct { - Col int - WsCol *int - Type evalengine.Type + Col int + WsCol *int + Type evalengine.Type + CollationEnv *collations.Environment } probeTable struct { - seenRows map[evalengine.HashCode][]sqltypes.Row - checkCols []CheckCol - sqlmode evalengine.SQLMode + seenRows map[evalengine.HashCode][]sqltypes.Row + checkCols []CheckCol + sqlmode evalengine.SQLMode + collationEnv *collations.Environment } ) @@ -139,7 +141,7 @@ func (pt *probeTable) hashCodeForRow(inputRow sqltypes.Row) (evalengine.HashCode func (pt *probeTable) equal(a, b sqltypes.Row) (bool, error) { for i, checkCol := range pt.checkCols { - cmp, err := evalengine.NullsafeCompare(a[i], b[i], checkCol.Type.Collation()) + cmp, err := evalengine.NullsafeCompare(a[i], b[i], pt.collationEnv, checkCol.Type.Collation()) if err != nil { _, isCollErr := err.(evalengine.UnsupportedCollationError) if !isCollErr || checkCol.WsCol == nil { @@ -147,7 +149,7 @@ func (pt *probeTable) equal(a, b sqltypes.Row) (bool, error) { } checkCol = checkCol.SwitchToWeightString() pt.checkCols[i] = checkCol - cmp, err = evalengine.NullsafeCompare(a[i], b[i], checkCol.Type.Collation()) + cmp, err = evalengine.NullsafeCompare(a[i], b[i], pt.collationEnv, checkCol.Type.Collation()) if err != nil { return false, err } @@ -159,12 +161,13 @@ func (pt *probeTable) equal(a, b sqltypes.Row) (bool, error) { return true, nil } -func newProbeTable(checkCols []CheckCol) *probeTable { +func newProbeTable(checkCols []CheckCol, collationEnv *collations.Environment) *probeTable { cols := make([]CheckCol, len(checkCols)) copy(cols, checkCols) return &probeTable{ - seenRows: map[evalengine.HashCode][]sqltypes.Row{}, - checkCols: cols, + seenRows: map[evalengine.HashCode][]sqltypes.Row{}, + checkCols: cols, + collationEnv: collationEnv, } } @@ -180,7 +183,7 @@ func (d *Distinct) TryExecute(ctx context.Context, vcursor VCursor, bindVars map InsertID: input.InsertID, } - pt := newProbeTable(d.CheckCols) + pt := newProbeTable(d.CheckCols, vcursor.CollationEnv()) for _, row := range input.Rows { exists, err := pt.exists(row) @@ -201,7 +204,7 @@ func (d *Distinct) TryExecute(ctx context.Context, vcursor VCursor, bindVars map func (d *Distinct) TryStreamExecute(ctx context.Context, vcursor VCursor, bindVars map[string]*querypb.BindVariable, wantfields bool, callback func(*sqltypes.Result) error) error { var mu sync.Mutex - pt := newProbeTable(d.CheckCols) + pt := newProbeTable(d.CheckCols, vcursor.CollationEnv()) err := vcursor.StreamExecutePrimitive(ctx, d.Source, bindVars, wantfields, func(input *sqltypes.Result) error { result := &sqltypes.Result{ Fields: input.Fields, @@ -277,16 +280,17 @@ func (d *Distinct) description() PrimitiveDescription { // SwitchToWeightString returns a new CheckCol that works on the weight string column instead func (cc CheckCol) SwitchToWeightString() CheckCol { return CheckCol{ - Col: *cc.WsCol, - WsCol: nil, - Type: evalengine.NewType(sqltypes.VarBinary, collations.CollationBinaryID), + Col: *cc.WsCol, + WsCol: nil, + Type: evalengine.NewType(sqltypes.VarBinary, collations.CollationBinaryID), + CollationEnv: cc.CollationEnv, } } func (cc CheckCol) String() string { var collation string if sqltypes.IsText(cc.Type.Type()) && cc.Type.Collation() != collations.Unknown { - collation = ": " + collations.Local().LookupName(cc.Type.Collation()) + collation = ": " + cc.CollationEnv.LookupName(cc.Type.Collation()) } var column string diff --git a/go/vt/vtgate/engine/distinct_test.go b/go/vt/vtgate/engine/distinct_test.go index a1591403f92..76e46496e21 100644 --- a/go/vt/vtgate/engine/distinct_test.go +++ b/go/vt/vtgate/engine/distinct_test.go @@ -89,8 +89,9 @@ func TestDistinct(t *testing.T) { collID = collations.CollationBinaryID } checkCols = append(checkCols, CheckCol{ - Col: i, - Type: evalengine.NewTypeEx(tc.inputs.Fields[i].Type, collID, false, 0, 0), + Col: i, + Type: evalengine.NewTypeEx(tc.inputs.Fields[i].Type, collID, false, 0, 0), + CollationEnv: collations.MySQL8(), }) } } diff --git a/go/vt/vtgate/engine/fake_vcursor_test.go b/go/vt/vtgate/engine/fake_vcursor_test.go index ae1c9e918a7..b8e1c911ebf 100644 --- a/go/vt/vtgate/engine/fake_vcursor_test.go +++ b/go/vt/vtgate/engine/fake_vcursor_test.go @@ -129,7 +129,17 @@ func (t *noopVCursor) SetContextWithValue(key, value interface{}) func() { // ConnCollation implements VCursor func (t *noopVCursor) ConnCollation() collations.ID { - return collations.Default() + return collations.MySQL8().DefaultConnectionCharset() +} + +// CollationEnv implements VCursor +func (t *noopVCursor) CollationEnv() *collations.Environment { + return collations.MySQL8() +} + +// SQLParser implements VCursor +func (t *noopVCursor) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() } func (t *noopVCursor) TimeZone() *time.Location { @@ -412,6 +422,8 @@ type loggingVCursor struct { ksShardMap map[string][]string shardSession []*srvtopo.ResolvedShard + + parser *sqlparser.Parser } func (f *loggingVCursor) HasCreatedTempTable() { @@ -798,13 +810,21 @@ func (f *loggingVCursor) nextResult() (*sqltypes.Result, error) { } func (f *loggingVCursor) CanUseSetVar() bool { - useSetVar := sqlparser.IsMySQL80AndAbove() && !f.disableSetVar + useSetVar := f.SQLParser().IsMySQL80AndAbove() && !f.disableSetVar if useSetVar { f.log = append(f.log, "SET_VAR can be used") } return useSetVar } +// SQLParser implements VCursor +func (t *loggingVCursor) SQLParser() *sqlparser.Parser { + if t.parser == nil { + return sqlparser.NewTestParser() + } + return t.parser +} + func (t *noopVCursor) VExplainLogging() {} func (t *noopVCursor) DisableLogging() {} func (t *noopVCursor) GetVExplainLogs() []ExecuteEntry { diff --git a/go/vt/vtgate/engine/filter_test.go b/go/vt/vtgate/engine/filter_test.go index 7d3c2cd0696..4db73c3049f 100644 --- a/go/vt/vtgate/engine/filter_test.go +++ b/go/vt/vtgate/engine/filter_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -70,6 +71,7 @@ func TestFilterPass(t *testing.T) { pred, err := evalengine.Translate(predicate, &evalengine.Config{ Collation: utf8mb4Bin, ResolveColumn: evalengine.FieldResolver(tc.res.Fields).Column, + CollationEnv: collations.MySQL8(), }) require.NoError(t, err) @@ -126,6 +128,7 @@ func TestFilterStreaming(t *testing.T) { pred, err := evalengine.Translate(predicate, &evalengine.Config{ Collation: utf8mb4Bin, ResolveColumn: evalengine.FieldResolver(tc.res[0].Fields).Column, + CollationEnv: collations.MySQL8(), }) require.NoError(t, err) diff --git a/go/vt/vtgate/engine/hash_join.go b/go/vt/vtgate/engine/hash_join.go index 4e305e8b59d..f7c9d87e1fb 100644 --- a/go/vt/vtgate/engine/hash_join.go +++ b/go/vt/vtgate/engine/hash_join.go @@ -65,6 +65,8 @@ type ( // collation and type are used to hash the incoming values correctly Collation collations.ID ComparisonType querypb.Type + + CollationEnv *collations.Environment } hashJoinProbeTable struct { @@ -249,7 +251,7 @@ func (hj *HashJoin) description() PrimitiveDescription { } coll := hj.Collation if coll != collations.Unknown { - other["Collation"] = collations.Local().LookupName(coll) + other["Collation"] = hj.CollationEnv.LookupName(coll) } return PrimitiveDescription{ OperatorType: "Join", diff --git a/go/vt/vtgate/engine/hash_join_test.go b/go/vt/vtgate/engine/hash_join_test.go index 7f7275a2c5c..d3271c643be 100644 --- a/go/vt/vtgate/engine/hash_join_test.go +++ b/go/vt/vtgate/engine/hash_join_test.go @@ -132,7 +132,7 @@ func TestHashJoinVariations(t *testing.T) { expected := sqltypes.MakeTestResult(fields, tc.expected...) - typ, err := evalengine.CoerceTypes(typeForOffset(tc.lhs), typeForOffset(tc.rhs)) + typ, err := evalengine.CoerceTypes(typeForOffset(tc.lhs), typeForOffset(tc.rhs), collations.MySQL8()) require.NoError(t, err) jn := &HashJoin{ @@ -142,6 +142,7 @@ func TestHashJoinVariations(t *testing.T) { RHSKey: tc.rhs, Collation: typ.Collation(), ComparisonType: typ.Type(), + CollationEnv: collations.MySQL8(), } t.Run(tc.name, func(t *testing.T) { @@ -166,7 +167,7 @@ func typeForOffset(i int) evalengine.Type { case 0: return evalengine.NewType(sqltypes.Int64, collations.CollationBinaryID) case 1: - return evalengine.NewType(sqltypes.VarChar, collations.Default()) + return evalengine.NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset()) default: panic(i) } diff --git a/go/vt/vtgate/engine/insert_test.go b/go/vt/vtgate/engine/insert_test.go index 217492a529f..e870ffa18c0 100644 --- a/go/vt/vtgate/engine/insert_test.go +++ b/go/vt/vtgate/engine/insert_test.go @@ -194,7 +194,7 @@ func TestInsertShardedSimple(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -336,7 +336,7 @@ func TestInsertShardWithONDuplicateKey(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -489,7 +489,7 @@ func TestInsertShardedFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -539,7 +539,7 @@ func TestInsertShardedGenerate(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -658,7 +658,7 @@ func TestInsertShardedOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -768,7 +768,7 @@ func TestInsertShardedOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -845,7 +845,7 @@ func TestInsertShardedGeo(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -951,7 +951,7 @@ func TestInsertShardedIgnoreOwned(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1107,7 +1107,7 @@ func TestInsertShardedIgnoreOwnedWithNull(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1205,7 +1205,7 @@ func TestInsertShardedUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1333,7 +1333,7 @@ func TestInsertShardedIgnoreUnownedVerify(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1439,7 +1439,7 @@ func TestInsertShardedIgnoreUnownedVerifyFail(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1516,7 +1516,7 @@ func TestInsertShardedUnownedReverseMap(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1630,7 +1630,7 @@ func TestInsertShardedUnownedReverseMapSuccess(t *testing.T) { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] ins := newInsert( @@ -1677,7 +1677,7 @@ func TestInsertSelectSimple(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] // A single row insert should be autocommitted @@ -1760,7 +1760,7 @@ func TestInsertSelectOwned(t *testing.T) { Name: "onecol", Columns: []string{"c3"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -1861,7 +1861,7 @@ func TestInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -1953,7 +1953,7 @@ func TestStreamingInsertSelectGenerate(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2050,7 +2050,7 @@ func TestInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2137,7 +2137,7 @@ func TestStreamingInsertSelectGenerateNotProvided(t *testing.T) { Name: "hash", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2234,7 +2234,7 @@ func TestInsertSelectUnowned(t *testing.T) { Name: "onecol", Columns: []string{"id"}}}}}}}} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) ks := vs.Keyspaces["sharded"] rb := &Route{ @@ -2338,7 +2338,7 @@ func TestInsertSelectShardingCases(t *testing.T) { "uks2": {Tables: map[string]*vschemapb.Table{"u2": {}}}, }} - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) sks1 := vs.Keyspaces["sks1"] sks2 := vs.Keyspaces["sks2"] uks1 := vs.Keyspaces["uks1"] diff --git a/go/vt/vtgate/engine/memory_sort_test.go b/go/vt/vtgate/engine/memory_sort_test.go index 3ec7b247029..7647bbce057 100644 --- a/go/vt/vtgate/engine/memory_sort_test.go +++ b/go/vt/vtgate/engine/memory_sort_test.go @@ -225,7 +225,7 @@ func TestMemorySortStreamExecuteCollation(t *testing.T) { )}, } - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") ms := &MemorySort{ OrderBy: []evalengine.OrderByParams{{ Col: 0, @@ -313,7 +313,7 @@ func TestMemorySortExecuteCollation(t *testing.T) { )}, } - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") ms := &MemorySort{ OrderBy: []evalengine.OrderByParams{{ Col: 0, diff --git a/go/vt/vtgate/engine/merge_sort_test.go b/go/vt/vtgate/engine/merge_sort_test.go index 93a443691c9..6b383e12572 100644 --- a/go/vt/vtgate/engine/merge_sort_test.go +++ b/go/vt/vtgate/engine/merge_sort_test.go @@ -179,7 +179,7 @@ func TestMergeSortCollation(t *testing.T) { ), }} - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") orderBy := []evalengine.OrderByParams{{ Col: 0, Type: evalengine.NewType(sqltypes.VarChar, collationID), diff --git a/go/vt/vtgate/engine/online_ddl.go b/go/vt/vtgate/engine/online_ddl.go index c972fee66e9..62126da4d08 100644 --- a/go/vt/vtgate/engine/online_ddl.go +++ b/go/vt/vtgate/engine/online_ddl.go @@ -20,7 +20,6 @@ import ( "context" "fmt" - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" querypb "vitess.io/vitess/go/vt/proto/query" @@ -79,7 +78,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma { Name: "uuid", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(vcursor.CollationEnv().DefaultConnectionCharset()), }, }, Rows: [][]sqltypes.Value{}, @@ -90,7 +89,7 @@ func (v *OnlineDDL) TryExecute(ctx context.Context, vcursor VCursor, bindVars ma migrationContext = fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()) } onlineDDLs, err := schema.NewOnlineDDLs(v.GetKeyspaceName(), v.SQL, v.DDL, - v.DDLStrategySetting, migrationContext, "", + v.DDLStrategySetting, migrationContext, "", vcursor.SQLParser(), ) if err != nil { return result, err diff --git a/go/vt/vtgate/engine/ordered_aggregate.go b/go/vt/vtgate/engine/ordered_aggregate.go index 99393d8afc2..ade8cd00299 100644 --- a/go/vt/vtgate/engine/ordered_aggregate.go +++ b/go/vt/vtgate/engine/ordered_aggregate.go @@ -28,13 +28,6 @@ import ( "vitess.io/vitess/go/vt/vtgate/evalengine" ) -var ( - // Some predefined values - countZero = sqltypes.MakeTrusted(sqltypes.Int64, []byte("0")) - countOne = sqltypes.MakeTrusted(sqltypes.Int64, []byte("1")) - sumZero = sqltypes.MakeTrusted(sqltypes.Decimal, []byte("0")) -) - var _ Primitive = (*OrderedAggregate)(nil) // OrderedAggregate is a primitive that expects the underlying primitive @@ -58,6 +51,8 @@ type OrderedAggregate struct { // Input is the primitive that will feed into this Primitive. Input Primitive + + CollationEnv *collations.Environment } // GroupByParams specify the grouping key to be used. @@ -67,6 +62,7 @@ type GroupByParams struct { Expr sqlparser.Expr FromGroupBy bool Type evalengine.Type + CollationEnv *collations.Environment } // String returns a string. Used for plan descriptions @@ -79,7 +75,7 @@ func (gbp GroupByParams) String() string { } if sqltypes.IsText(gbp.Type.Type()) && gbp.Type.Collation() != collations.Unknown { - out += " COLLATE " + collations.Local().LookupName(gbp.Type.Collation()) + out += " COLLATE " + gbp.CollationEnv.LookupName(gbp.Type.Collation()) } return out @@ -348,14 +344,14 @@ func (oa *OrderedAggregate) nextGroupBy(currentKey, nextRow []sqltypes.Value) (n return nextRow, true, nil } - cmp, err := evalengine.NullsafeCompare(v1, v2, gb.Type.Collation()) + cmp, err := evalengine.NullsafeCompare(v1, v2, oa.CollationEnv, gb.Type.Collation()) if err != nil { _, isCollationErr := err.(evalengine.UnsupportedCollationError) if !isCollationErr || gb.WeightStringCol == -1 { return nil, false, err } gb.KeyCol = gb.WeightStringCol - cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], gb.Type.Collation()) + cmp, err = evalengine.NullsafeCompare(currentKey[gb.WeightStringCol], nextRow[gb.WeightStringCol], oa.CollationEnv, gb.Type.Collation()) if err != nil { return nil, false, err } diff --git a/go/vt/vtgate/engine/ordered_aggregate_test.go b/go/vt/vtgate/engine/ordered_aggregate_test.go index f977e3b09dc..f90f22cd65d 100644 --- a/go/vt/vtgate/engine/ordered_aggregate_test.go +++ b/go/vt/vtgate/engine/ordered_aggregate_test.go @@ -62,7 +62,7 @@ func TestOrderedAggregateExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -94,7 +94,7 @@ func TestOrderedAggregateExecuteTruncate(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateSum, 1, "") + aggr := NewAggregateParam(AggregateSum, 1, "", collations.MySQL8()) aggr.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ @@ -134,7 +134,7 @@ func TestMinMaxFailsCorrectly(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateMax, 0, "") + aggr := NewAggregateParam(AggregateMax, 0, "", collations.MySQL8()) aggr.WCol = 1 oa := &ScalarAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -163,7 +163,7 @@ func TestOrderedAggregateStreamExecute(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -202,7 +202,7 @@ func TestOrderedAggregateStreamExecuteTruncate(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 2}}, TruncateColumnCount: 2, Input: fp, @@ -305,8 +305,8 @@ func TestOrderedAggregateExecuteCountDistinct(t *testing.T) { )}, } - aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)") - aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr1 := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)", collations.MySQL8()) + aggr2 := NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()) aggr2.OrigOpcode = AggregateCountStar oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr1, aggr2}, @@ -374,12 +374,12 @@ func TestOrderedAggregateStreamCountDistinct(t *testing.T) { )}, } - aggr2 := NewAggregateParam(AggregateSum, 2, "") + aggr2 := NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()) aggr2.OrigOpcode = AggregateCountDistinct oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)"), + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct col2)", collations.MySQL8()), aggr2}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -460,8 +460,8 @@ func TestOrderedAggregateSumDistinctGood(t *testing.T) { oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)"), - NewAggregateParam(AggregateSum, 2, ""), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)", collations.MySQL8()), + NewAggregateParam(AggregateSum, 2, "", collations.MySQL8()), }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -504,7 +504,7 @@ func TestOrderedAggregateSumDistinctTolerateError(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct col2)", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -536,7 +536,7 @@ func TestOrderedAggregateKeysFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -566,7 +566,7 @@ func TestOrderedAggregateMergeFail(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -627,7 +627,7 @@ func TestOrderedAggregateExecuteGtid(t *testing.T) { } oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGtid, 1, "vgtid", collations.MySQL8())}, TruncateColumnCount: 2, Input: fp, } @@ -660,7 +660,7 @@ func TestCountDistinctOnVarchar(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -720,7 +720,7 @@ func TestCountDistinctOnVarcharWithNulls(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)") + aggr := NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -782,7 +782,7 @@ func TestSumDistinctOnVarcharWithNulls(t *testing.T) { )}, } - aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)") + aggr := NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct c2)", collations.MySQL8()) aggr.WCol = 2 oa := &OrderedAggregate{ Aggregates: []*AggregateParams{aggr}, @@ -848,8 +848,8 @@ func TestMultiDistinct(t *testing.T) { oa := &OrderedAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)"), - NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)"), + NewAggregateParam(AggregateCountDistinct, 1, "count(distinct c2)", collations.MySQL8()), + NewAggregateParam(AggregateSumDistinct, 2, "sum(distinct c3)", collations.MySQL8()), }, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, @@ -906,7 +906,7 @@ func TestOrderedAggregateCollate(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_ai_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -944,7 +944,7 @@ func TestOrderedAggregateCollateAS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_0900_as_ci") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -984,7 +984,7 @@ func TestOrderedAggregateCollateKS(t *testing.T) { collationID, _ := collationEnv.LookupID("utf8mb4_ja_0900_as_cs_ks") oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateSum, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0, Type: evalengine.NewType(sqltypes.Unknown, collationID)}}, Input: fp, } @@ -1066,7 +1066,7 @@ func TestGroupConcatWithAggrOnEngine(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "group_concat(c2)", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } @@ -1145,7 +1145,7 @@ func TestGroupConcat(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { fp := &fakePrimitive{results: []*sqltypes.Result{tcase.inputResult}} oa := &OrderedAggregate{ - Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "")}, + Aggregates: []*AggregateParams{NewAggregateParam(AggregateGroupConcat, 1, "", collations.MySQL8())}, GroupByKeys: []*GroupByParams{{KeyCol: 0}}, Input: fp, } diff --git a/go/vt/vtgate/engine/primitive.go b/go/vt/vtgate/engine/primitive.go index c69423ee3fb..a9627d358bc 100644 --- a/go/vt/vtgate/engine/primitive.go +++ b/go/vt/vtgate/engine/primitive.go @@ -87,6 +87,8 @@ type ( Session() SessionActions ConnCollation() collations.ID + CollationEnv() *collations.Environment + SQLParser() *sqlparser.Parser TimeZone() *time.Location SQLMode() string diff --git a/go/vt/vtgate/engine/projection_test.go b/go/vt/vtgate/engine/projection_test.go index 2d260e901ea..d208fb2ec67 100644 --- a/go/vt/vtgate/engine/projection_test.go +++ b/go/vt/vtgate/engine/projection_test.go @@ -38,7 +38,10 @@ func TestMultiply(t *testing.T) { Left: &sqlparser.Offset{V: 0}, Right: &sqlparser.Offset{V: 1}, } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + CollationEnv: collations.MySQL8(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult( @@ -78,7 +81,10 @@ func TestProjectionStreaming(t *testing.T) { Left: &sqlparser.Offset{V: 0}, Right: &sqlparser.Offset{V: 1}, } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + CollationEnv: collations.MySQL8(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) fp := &fakePrimitive{ results: sqltypes.MakeTestStreamingResults( @@ -121,7 +127,10 @@ func TestEmptyInput(t *testing.T) { Left: &sqlparser.Offset{V: 0}, Right: &sqlparser.Offset{V: 1}, } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + CollationEnv: collations.MySQL8(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) fp := &fakePrimitive{ results: []*sqltypes.Result{sqltypes.MakeTestResult(sqltypes.MakeTestFields("a|b", "uint64|uint64"))}, @@ -151,7 +160,10 @@ func TestEmptyInput(t *testing.T) { } func TestHexAndBinaryArgument(t *testing.T) { - hexExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) + hexExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), &evalengine.Config{ + CollationEnv: collations.MySQL8(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) proj := &Projection{ Cols: []string{"hex"}, @@ -183,7 +195,7 @@ func TestFields(t *testing.T) { name: `string`, bindVar: sqltypes.StringBindVariable("test"), typ: querypb.Type_VARCHAR, - collation: collations.Default(), + collation: collations.MySQL8().DefaultConnectionCharset(), }, { name: `binary`, @@ -195,7 +207,10 @@ func TestFields(t *testing.T) { for _, testCase := range testCases { t.Run(testCase.name, func(t *testing.T) { - bindExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), nil) + bindExpr, err := evalengine.Translate(sqlparser.NewArgument("vtg1"), &evalengine.Config{ + CollationEnv: collations.MySQL8(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + }) require.NoError(t, err) proj := &Projection{ Cols: []string{"col"}, diff --git a/go/vt/vtgate/engine/revert_migration.go b/go/vt/vtgate/engine/revert_migration.go index e7237d01da4..23275ddd043 100644 --- a/go/vt/vtgate/engine/revert_migration.go +++ b/go/vt/vtgate/engine/revert_migration.go @@ -88,7 +88,7 @@ func (v *RevertMigration) TryExecute(ctx context.Context, vcursor VCursor, bindV return nil, err } ddlStrategySetting.Strategy = schema.DDLStrategyOnline // and we keep the options as they were - onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "") + onlineDDL, err := schema.NewOnlineDDL(v.GetKeyspaceName(), "", sql, ddlStrategySetting, fmt.Sprintf("vtgate:%s", vcursor.Session().GetSessionUUID()), "", vcursor.SQLParser()) if err != nil { return result, err } diff --git a/go/vt/vtgate/engine/route_test.go b/go/vt/vtgate/engine/route_test.go index 5a89aae8d5f..45f8e66c298 100644 --- a/go/vt/vtgate/engine/route_test.go +++ b/go/vt/vtgate/engine/route_test.go @@ -1073,7 +1073,7 @@ func TestRouteSortCollation(t *testing.T) { "dummy_select_field", ) - collationID, _ := collations.Local().LookupID("utf8mb4_hu_0900_ai_ci") + collationID, _ := collations.MySQL8().LookupID("utf8mb4_hu_0900_ai_ci") sel.OrderBy = []evalengine.OrderByParams{{ Col: 0, diff --git a/go/vt/vtgate/engine/scalar_aggregation_test.go b/go/vt/vtgate/engine/scalar_aggregation_test.go index 3329fc72d39..99031c95f34 100644 --- a/go/vt/vtgate/engine/scalar_aggregation_test.go +++ b/go/vt/vtgate/engine/scalar_aggregation_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" . "vitess.io/vitess/go/vt/vtgate/engine/opcode" @@ -273,8 +274,8 @@ func TestScalarDistinctAggrOnEngine(t *testing.T) { oa := &ScalarAggregate{ Aggregates: []*AggregateParams{ - NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)"), - NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)"), + NewAggregateParam(AggregateCountDistinct, 0, "count(distinct value)", collations.MySQL8()), + NewAggregateParam(AggregateSumDistinct, 1, "sum(distinct value)", collations.MySQL8()), }, Input: fp, } @@ -311,9 +312,9 @@ func TestScalarDistinctPushedDown(t *testing.T) { "8|90", )}} - countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)") + countAggr := NewAggregateParam(AggregateSum, 0, "count(distinct value)", collations.MySQL8()) countAggr.OrigOpcode = AggregateCountDistinct - sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)") + sumAggr := NewAggregateParam(AggregateSum, 1, "sum(distinct value)", collations.MySQL8()) sumAggr.OrigOpcode = AggregateSumDistinct oa := &ScalarAggregate{ Aggregates: []*AggregateParams{ diff --git a/go/vt/vtgate/engine/set.go b/go/vt/vtgate/engine/set.go index 9e9500d1ca8..601ed5ca50a 100644 --- a/go/vt/vtgate/engine/set.go +++ b/go/vt/vtgate/engine/set.go @@ -216,7 +216,6 @@ func (svi *SysVarIgnore) VariableName() string { // Execute implements the SetOp interface method. func (svi *SysVarIgnore) Execute(context.Context, VCursor, *evalengine.ExpressionEnv) error { - log.Infof("Ignored inapplicable SET %v = %v", svi.Name, svi.Expr) return nil } @@ -249,7 +248,7 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor, return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unexpected error, DestinationKeyspaceID mapping to multiple shards: %v", svci.TargetDestination) } checkSysVarQuery := fmt.Sprintf("select 1 from dual where @@%s = %s", svci.Name, svci.Expr) - result, err := execShard(ctx, nil, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) + _, err = execShard(ctx, nil, vcursor, checkSysVarQuery, env.BindVars, rss[0], false /* rollbackOnError */, false /* canAutocommit */) if err != nil { // Rather than returning the error, we will just log the error // as the intention for executing the query it to validate the current setting and eventually ignore it anyways. @@ -257,9 +256,6 @@ func (svci *SysVarCheckAndIgnore) Execute(ctx context.Context, vcursor VCursor, log.Warningf("unable to validate the current settings for '%s': %s", svci.Name, err.Error()) return nil } - if len(result.Rows) == 0 { - log.Infof("Ignored inapplicable SET %v = %v", svci.Name, svci.Expr) - } return nil } diff --git a/go/vt/vtgate/engine/set_test.go b/go/vt/vtgate/engine/set_test.go index dbce162ff87..0677ee40bd8 100644 --- a/go/vt/vtgate/engine/set_test.go +++ b/go/vt/vtgate/engine/set_test.go @@ -363,7 +363,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed additional - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -383,7 +383,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - changed less - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -420,7 +420,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL57", - mysqlVersion: "50709", + mysqlVersion: "5.7.9", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -459,7 +459,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -479,7 +479,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change to empty - non empty orig - MySQL80 - should use reserved conn", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -499,7 +499,7 @@ func TestSetTable(t *testing.T) { )}, }, { testName: "sql_mode change - empty orig - MySQL80 - SET_VAR disabled", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -520,7 +520,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "sql_mode set an unsupported mode", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "sql_mode", @@ -540,7 +540,7 @@ func TestSetTable(t *testing.T) { disableSetVar: true, }, { testName: "default_week_format change - empty orig - MySQL80", - mysqlVersion: "80000", + mysqlVersion: "8.0.0", setOps: []SetOp{ &SysVarReservedConn{ Name: "default_week_format", @@ -565,23 +565,22 @@ func TestSetTable(t *testing.T) { tc.input = &SingleRow{} } - oldMySQLVersion := sqlparser.GetParserVersion() - defer func() { sqlparser.SetParserVersion(oldMySQLVersion) }() - if tc.mysqlVersion != "" { - sqlparser.SetParserVersion(tc.mysqlVersion) - } - set := &Set{ Ops: tc.setOps, Input: tc.input, } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: tc.mysqlVersion, + }) + require.NoError(t, err) vc := &loggingVCursor{ shards: []string{"-20", "20-"}, results: tc.qr, multiShardErrs: []error{tc.execErr}, disableSetVar: tc.disableSetVar, + parser: parser, } - _, err := set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) + _, err = set.TryExecute(context.Background(), vc, map[string]*querypb.BindVariable{}, false) if tc.expectedError == "" { require.NoError(t, err) } else { diff --git a/go/vt/vtgate/engine/update_test.go b/go/vt/vtgate/engine/update_test.go index e2ee9d553d1..9d583cdcfcf 100644 --- a/go/vt/vtgate/engine/update_test.go +++ b/go/vt/vtgate/engine/update_test.go @@ -21,6 +21,7 @@ import ( "errors" "testing" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -1023,7 +1024,7 @@ func buildTestVSchema() *vindexes.VSchema { }, }, } - vs := vindexes.BuildVSchema(invschema) + vs := vindexes.BuildVSchema(invschema, sqlparser.NewTestParser()) return vs } diff --git a/go/vt/vtgate/engine/vexplain.go b/go/vt/vtgate/engine/vexplain.go index ad540f96c9c..23b8ebc2139 100644 --- a/go/vt/vtgate/engine/vexplain.go +++ b/go/vt/vtgate/engine/vexplain.go @@ -122,7 +122,7 @@ func (v *VExplain) convertToVExplainAllResult(ctx context.Context, vcursor VCurs explainQuery := fmt.Sprintf("explain format = json %v", entry.Query) // We rely on the parser to see if the query we have is explainable or not // If we get an error in parsing then we can't execute explain on the given query, and we skip it - _, err := sqlparser.Parse(explainQuery) + _, err := vcursor.SQLParser().Parse(explainQuery) if err != nil { continue } diff --git a/go/vt/vtgate/evalengine/api_aggregation.go b/go/vt/vtgate/evalengine/api_aggregation.go index 05a4f8711ad..0566f477a3c 100644 --- a/go/vt/vtgate/evalengine/api_aggregation.go +++ b/go/vt/vtgate/evalengine/api_aggregation.go @@ -445,8 +445,9 @@ func NewAggregationSum(type_ sqltypes.Type) Sum { // The aggregation is performed using the slow NullSafeComparison path of the // evaluation engine. type aggregationMinMax struct { - current sqltypes.Value - collation collations.ID + current sqltypes.Value + collation collations.ID + collationEnv *collations.Environment } func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { @@ -457,7 +458,7 @@ func (a *aggregationMinMax) minmax(value sqltypes.Value, max bool) (err error) { a.current = value return nil } - n, err := compare(a.current, value, a.collation) + n, err := compare(a.current, value, a.collationEnv, a.collation) if err != nil { return err } @@ -483,17 +484,17 @@ func (a *aggregationMinMax) Reset() { a.current = sqltypes.NULL } -func NewAggregationMinMax(type_ sqltypes.Type, collation collations.ID) MinMax { +func NewAggregationMinMax(typ sqltypes.Type, collationEnv *collations.Environment, collation collations.ID) MinMax { switch { - case sqltypes.IsSigned(type_): - return &aggregationInt{t: type_} - case sqltypes.IsUnsigned(type_): - return &aggregationUint{t: type_} - case sqltypes.IsFloat(type_): - return &aggregationFloat{t: type_} - case sqltypes.IsDecimal(type_): + case sqltypes.IsSigned(typ): + return &aggregationInt{t: typ} + case sqltypes.IsUnsigned(typ): + return &aggregationUint{t: typ} + case sqltypes.IsFloat(typ): + return &aggregationFloat{t: typ} + case sqltypes.IsDecimal(typ): return &aggregationDecimal{} default: - return &aggregationMinMax{collation: collation} + return &aggregationMinMax{collation: collation, collationEnv: collationEnv} } } diff --git a/go/vt/vtgate/evalengine/api_aggregation_test.go b/go/vt/vtgate/evalengine/api_aggregation_test.go index bd3a10547fe..e5dae47017e 100644 --- a/go/vt/vtgate/evalengine/api_aggregation_test.go +++ b/go/vt/vtgate/evalengine/api_aggregation_test.go @@ -137,7 +137,7 @@ func TestMinMax(t *testing.T) { for i, tcase := range tcases { t.Run(strconv.Itoa(i), func(t *testing.T) { t.Run("Min", func(t *testing.T) { - agg := NewAggregationMinMax(tcase.type_, tcase.coll) + agg := NewAggregationMinMax(tcase.type_, collations.MySQL8(), tcase.coll) for _, v := range tcase.values { err := agg.Min(v) @@ -153,7 +153,7 @@ func TestMinMax(t *testing.T) { }) t.Run("Max", func(t *testing.T) { - agg := NewAggregationMinMax(tcase.type_, tcase.coll) + agg := NewAggregationMinMax(tcase.type_, collations.MySQL8(), tcase.coll) for _, v := range tcase.values { err := agg.Max(v) diff --git a/go/vt/vtgate/evalengine/api_coerce.go b/go/vt/vtgate/evalengine/api_coerce.go index cbd1b145ca6..2730cedff07 100644 --- a/go/vt/vtgate/evalengine/api_coerce.go +++ b/go/vt/vtgate/evalengine/api_coerce.go @@ -32,7 +32,7 @@ func CoerceTo(value sqltypes.Value, typ sqltypes.Type, sqlmode SQLMode) (sqltype } // CoerceTypes takes two input types, and decides how they should be coerced before compared -func CoerceTypes(v1, v2 Type) (out Type, err error) { +func CoerceTypes(v1, v2 Type, collationEnv *collations.Environment) (out Type, err error) { if v1 == v2 { return v1, nil } @@ -47,7 +47,7 @@ func CoerceTypes(v1, v2 Type) (out Type, err error) { switch { case sqltypes.IsTextOrBinary(v1.Type()) && sqltypes.IsTextOrBinary(v2.Type()): - mergedCollation, _, _, ferr := mergeCollations(typedCoercionCollation(v1.Type(), v1.Collation()), typedCoercionCollation(v2.Type(), v2.Collation()), v1.Type(), v2.Type()) + mergedCollation, _, _, ferr := mergeCollations(typedCoercionCollation(v1.Type(), v1.Collation()), typedCoercionCollation(v2.Type(), v2.Collation()), v1.Type(), v2.Type(), collationEnv) if ferr != nil { return Type{}, ferr } diff --git a/go/vt/vtgate/evalengine/api_compare.go b/go/vt/vtgate/evalengine/api_compare.go index b595c71a175..c6278264a47 100644 --- a/go/vt/vtgate/evalengine/api_compare.go +++ b/go/vt/vtgate/evalengine/api_compare.go @@ -43,7 +43,7 @@ func (err UnsupportedCollationError) Error() string { // UnsupportedCollationHashError is returned when we try to get the hash value and are missing the collation to use var UnsupportedCollationHashError = vterrors.Errorf(vtrpcpb.Code_INTERNAL, "text type with an unknown/unsupported collation cannot be hashed") -func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { +func compare(v1, v2 sqltypes.Value, collationEnv *collations.Environment, collationID collations.ID) (int, error) { v1t := v1.Type() // We have a fast path here for the case where both values are @@ -129,7 +129,7 @@ func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { return 0, err } - out, err := evalCompare(v1eval, v2eval) + out, err := evalCompare(v1eval, v2eval, collationEnv) if err != nil { return 0, err } @@ -147,7 +147,7 @@ func compare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { // numeric, then a numeric comparison is performed after // necessary conversions. If none are numeric, then it's // a simple binary comparison. Uncomparable values return an error. -func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, error) { +func NullsafeCompare(v1, v2 sqltypes.Value, collationEnv *collations.Environment, collationID collations.ID) (int, error) { // Based on the categorization defined for the types, // we're going to allow comparison of the following: // Null, isNumber, IsBinary. This will exclude IsQuoted @@ -161,7 +161,7 @@ func NullsafeCompare(v1, v2 sqltypes.Value, collationID collations.ID) (int, err if v2.IsNull() { return 1, nil } - return compare(v1, v2, collationID) + return compare(v1, v2, collationEnv, collationID) } // OrderByParams specifies the parameters for ordering. @@ -176,6 +176,8 @@ type ( // Type for knowing if the collation is relevant Type Type + + CollationEnv *collations.Environment } Comparison []OrderByParams @@ -199,7 +201,7 @@ func (obp *OrderByParams) String() string { } if sqltypes.IsText(obp.Type.Type()) && obp.Type.Collation() != collations.Unknown { - val += " COLLATE " + collations.Local().LookupName(obp.Type.Collation()) + val += " COLLATE " + obp.CollationEnv.LookupName(obp.Type.Collation()) } return val } @@ -211,7 +213,7 @@ func (obp *OrderByParams) Compare(r1, r2 []sqltypes.Value) int { if cmp == 0 { var err error - cmp, err = NullsafeCompare(v1, v2, obp.Type.Collation()) + cmp, err = NullsafeCompare(v1, v2, obp.CollationEnv, obp.Type.Collation()) if err != nil { _, isCollationErr := err.(UnsupportedCollationError) if !isCollationErr || obp.WeightStringCol == -1 { @@ -220,7 +222,7 @@ func (obp *OrderByParams) Compare(r1, r2 []sqltypes.Value) int { // in case of a comparison or collation error switch to using the weight string column for ordering obp.Col = obp.WeightStringCol obp.WeightStringCol = -1 - cmp, err = NullsafeCompare(r1[obp.Col], r2[obp.Col], obp.Type.Collation()) + cmp, err = NullsafeCompare(r1[obp.Col], r2[obp.Col], obp.CollationEnv, obp.Type.Collation()) if err != nil { panic(err) } diff --git a/go/vt/vtgate/evalengine/api_compare_test.go b/go/vt/vtgate/evalengine/api_compare_test.go index 4da234edbc4..6c74d6e4ed8 100644 --- a/go/vt/vtgate/evalengine/api_compare_test.go +++ b/go/vt/vtgate/evalengine/api_compare_test.go @@ -78,11 +78,12 @@ func (tc testCase) run(t *testing.T) { for i, value := range tc.row { fields[i] = &querypb.Field{Type: value.Type()} } - env := NewExpressionEnv(context.Background(), tc.bv, nil) + env := NewExpressionEnv(context.Background(), tc.bv, NewEmptyVCursor(collations.MySQL8(), time.UTC)) env.Row = tc.row ast := &astCompiler{ cfg: &Config{ - Collation: collations.CollationUtf8mb4ID, + Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), }, } cmp, err := ast.translateComparisonExpr2(tc.op, tc.v1, tc.v2) @@ -944,13 +945,13 @@ func TestCompareStrings(t *testing.T) { tests := []testCase{ { name: "string equal string", - v1: newColumn(0, NewType(sqltypes.VarChar, collations.Default())), v2: newColumn(1, NewType(sqltypes.VarChar, collations.Default())), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), v2: newColumn(1, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("toto"), sqltypes.NewVarChar("toto")}, }, { name: "string equal number", - v1: newColumn(0, NewType(sqltypes.VarChar, collations.Default())), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), + v1: newColumn(0, NewType(sqltypes.VarChar, collations.MySQL8().DefaultConnectionCharset())), v2: newColumn(1, NewType(sqltypes.Int64, collations.CollationBinaryID)), out: &T, op: sqlparser.EqualOp, row: []sqltypes.Value{sqltypes.NewVarChar("1"), sqltypes.NewInt64(1)}, }, @@ -1153,7 +1154,7 @@ func TestNullsafeCompare(t *testing.T) { } for _, tcase := range tcases { t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { - got, err := NullsafeCompare(tcase.v1, tcase.v2, collation) + got, err := NullsafeCompare(tcase.v1, tcase.v2, collations.MySQL8(), collation) if tcase.err != nil { require.EqualError(t, err, tcase.err.Error()) return @@ -1242,7 +1243,7 @@ func TestNullsafeCompareCollate(t *testing.T) { } for _, tcase := range tcases { t.Run(fmt.Sprintf("%v/%v", tcase.v1, tcase.v2), func(t *testing.T) { - got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), tcase.collation) + got, err := NullsafeCompare(TestValue(sqltypes.VarChar, tcase.v1), TestValue(sqltypes.VarChar, tcase.v2), collations.MySQL8(), tcase.collation) if tcase.err == nil { require.NoError(t, err) } else { @@ -1293,7 +1294,7 @@ func BenchmarkNullSafeComparison(b *testing.B) { for i := 0; i < b.N; i++ { for _, lhs := range inputs { for _, rhs := range inputs { - _, _ = NullsafeCompare(lhs, rhs, collid) + _, _ = NullsafeCompare(lhs, rhs, collations.MySQL8(), collid) } } } @@ -1323,7 +1324,7 @@ func BenchmarkNullSafeComparison(b *testing.B) { for i := 0; i < b.N; i++ { for _, lhs := range inputs { for _, rhs := range inputs { - _, _ = NullsafeCompare(lhs, rhs, collations.CollationUtf8mb4ID) + _, _ = NullsafeCompare(lhs, rhs, collations.MySQL8(), collations.CollationUtf8mb4ID) } } } diff --git a/go/vt/vtgate/evalengine/api_hash_test.go b/go/vt/vtgate/evalengine/api_hash_test.go index c1e5d880bdd..7a680892712 100644 --- a/go/vt/vtgate/evalengine/api_hash_test.go +++ b/go/vt/vtgate/evalengine/api_hash_test.go @@ -52,7 +52,7 @@ func TestHashCodes(t *testing.T) { for _, tc := range cases { t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.MySQL8(), collations.CollationUtf8mb4ID) require.NoError(t, err) require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) @@ -72,12 +72,12 @@ func TestHashCodes(t *testing.T) { func TestHashCodesRandom(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci") + collation := collations.MySQL8().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ v1, v2 := sqltypes.TestRandomValues() - cmp, err := NullsafeCompare(v1, v2, collation) + cmp, err := NullsafeCompare(v1, v2, collations.MySQL8(), collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) @@ -137,7 +137,7 @@ func TestHashCodes128(t *testing.T) { for _, tc := range cases { t.Run(fmt.Sprintf("%v %s %v", tc.static, equality(tc.equal).Operator(), tc.dynamic), func(t *testing.T) { - cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.CollationUtf8mb4ID) + cmp, err := NullsafeCompare(tc.static, tc.dynamic, collations.MySQL8(), collations.CollationUtf8mb4ID) require.NoError(t, err) require.Equalf(t, tc.equal, cmp == 0, "got %v %s %v (expected %s)", tc.static, equality(cmp == 0).Operator(), tc.dynamic, equality(tc.equal)) @@ -161,12 +161,12 @@ func TestHashCodes128(t *testing.T) { func TestHashCodesRandom128(t *testing.T) { tested := 0 equal := 0 - collation := collations.Local().LookupByName("utf8mb4_general_ci") + collation := collations.MySQL8().LookupByName("utf8mb4_general_ci") endTime := time.Now().Add(1 * time.Second) for time.Now().Before(endTime) { tested++ v1, v2 := sqltypes.TestRandomValues() - cmp, err := NullsafeCompare(v1, v2, collation) + cmp, err := NullsafeCompare(v1, v2, collations.MySQL8(), collation) require.NoErrorf(t, err, "%s compared with %s", v1.String(), v2.String()) typ, err := coerceTo(v1.Type(), v2.Type()) require.NoError(t, err) diff --git a/go/vt/vtgate/evalengine/cached_size.go b/go/vt/vtgate/evalengine/cached_size.go index b0860f0b9e3..f43ec3c6ff8 100644 --- a/go/vt/vtgate/evalengine/cached_size.go +++ b/go/vt/vtgate/evalengine/cached_size.go @@ -145,10 +145,12 @@ func (cached *CollateExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *Column) CachedSize(alloc bool) int64 { @@ -211,6 +213,8 @@ func (cached *ConvertExpr) CachedSize(alloc bool) int64 { size += cached.UnaryExpr.CachedSize(false) // field Type string size += hack.RuntimeAllocSize(int64(len(cached.Type))) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { @@ -219,10 +223,12 @@ func (cached *ConvertUsingExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *InExpr) CachedSize(alloc bool) int64 { @@ -255,10 +261,12 @@ func (cached *IntroducerExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(24) + size += int64(32) } // field UnaryExpr vitess.io/vitess/go/vt/vtgate/evalengine.UnaryExpr size += cached.UnaryExpr.CachedSize(false) + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) return size } func (cached *IsExpr) CachedSize(alloc bool) int64 { @@ -343,6 +351,18 @@ func (cached *NotExpr) CachedSize(alloc bool) int64 { size += cached.UnaryExpr.CachedSize(false) return size } +func (cached *OrderByParams) CachedSize(alloc bool) int64 { + if cached == nil { + return int64(0) + } + size := int64(0) + if alloc { + size += int64(48) + } + // field CollationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.CollationEnv.CachedSize(true) + return size +} func (cached *UnaryExpr) CachedSize(alloc bool) int64 { if cached == nil { return int64(0) @@ -363,12 +383,14 @@ func (cached *UntypedExpr) CachedSize(alloc bool) int64 { } size := int64(0) if alloc { - size += int64(80) + size += int64(96) } // field ir vitess.io/vitess/go/vt/vtgate/evalengine.IR if cc, ok := cached.ir.(cachedObject); ok { size += cc.CachedSize(true) } + // field collationEnv *vitess.io/vitess/go/mysql/collations.Environment + size += cached.collationEnv.CachedSize(true) // field needTypes []vitess.io/vitess/go/vt/vtgate/evalengine.typedIR { size += hack.RuntimeAllocSize(int64(cap(cached.needTypes)) * int64(16)) diff --git a/go/vt/vtgate/evalengine/casting_test.go b/go/vt/vtgate/evalengine/casting_test.go index 93c04d74539..1d75a9b24ab 100644 --- a/go/vt/vtgate/evalengine/casting_test.go +++ b/go/vt/vtgate/evalengine/casting_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" ) func TestEvalResultToBooleanStrict(t *testing.T) { @@ -43,7 +45,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range trueValues { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected true (success)", name), func(t *testing.T) { - result, err := (&EvalResult{res}).ToBooleanStrict() + result, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.NoError(t, err, name) require.Equal(t, true, result, name) }) @@ -51,7 +53,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range falseValues { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected false (success)", name), func(t *testing.T) { - result, err := (&EvalResult{res}).ToBooleanStrict() + result, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.NoError(t, err, name) require.Equal(t, false, result, name) }) @@ -59,7 +61,7 @@ func TestEvalResultToBooleanStrict(t *testing.T) { for _, res := range invalid { name := evalToSQLValue(res).String() t.Run(fmt.Sprintf("ToBooleanStrict() %s expected fail", name), func(t *testing.T) { - _, err := (&EvalResult{res}).ToBooleanStrict() + _, err := (&EvalResult{v: res, collationEnv: collations.MySQL8()}).ToBooleanStrict() require.Error(t, err) }) } diff --git a/go/vt/vtgate/evalengine/collation.go b/go/vt/vtgate/evalengine/collation.go index b4e589c9724..c0feca87556 100644 --- a/go/vt/vtgate/evalengine/collation.go +++ b/go/vt/vtgate/evalengine/collation.go @@ -54,7 +54,7 @@ func evalCollation(e eval) collations.TypedCollation { } } -func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { +func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type, env *collations.Environment) (collations.TypedCollation, colldata.Coercion, colldata.Coercion, error) { if c1.Collation == c2.Collation { return c1, nil, nil, nil } @@ -71,18 +71,17 @@ func mergeCollations(c1, c2 collations.TypedCollation, t1, t2 sqltypes.Type) (co return collationBinary, nil, nil, nil } - env := collations.Local() return colldata.Merge(env, c1, c2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) } -func mergeAndCoerceCollations(left, right eval) (eval, eval, collations.TypedCollation, error) { +func mergeAndCoerceCollations(left, right eval, env *collations.Environment) (eval, eval, collations.TypedCollation, error) { lt := left.SQLType() rt := right.SQLType() - mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt) + mc, coerceLeft, coerceRight, err := mergeCollations(evalCollation(left), evalCollation(right), lt, rt, env) if err != nil { return nil, nil, collations.TypedCollation{}, err } @@ -112,7 +111,7 @@ type collationAggregation struct { cur collations.TypedCollation } -func (ca *collationAggregation) add(env *collations.Environment, tc collations.TypedCollation) error { +func (ca *collationAggregation) add(tc collations.TypedCollation, env *collations.Environment) error { if ca.cur.Collation == collations.Unknown { ca.cur = tc } else { diff --git a/go/vt/vtgate/evalengine/compare.go b/go/vt/vtgate/evalengine/compare.go index aa452c61729..102d6142321 100644 --- a/go/vt/vtgate/evalengine/compare.go +++ b/go/vt/vtgate/evalengine/compare.go @@ -19,6 +19,7 @@ package evalengine import ( "bytes" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/collations/colldata" "vitess.io/vitess/go/mysql/decimal" "vitess.io/vitess/go/mysql/json" @@ -133,8 +134,8 @@ func compareDateAndString(l, r eval) int { // More on string collations coercibility on MySQL documentation: // - https://dev.mysql.com/doc/refman/8.0/en/charset-collation-coercibility.html -func compareStrings(l, r eval) (int, error) { - l, r, col, err := mergeAndCoerceCollations(l, r) +func compareStrings(l, r eval, env *collations.Environment) (int, error) { + l, r, col, err := mergeAndCoerceCollations(l, r, env) if err != nil { return 0, err } diff --git a/go/vt/vtgate/evalengine/compiler.go b/go/vt/vtgate/evalengine/compiler.go index d757b3c3192..6df13dc4ccf 100644 --- a/go/vt/vtgate/evalengine/compiler.go +++ b/go/vt/vtgate/evalengine/compiler.go @@ -34,6 +34,7 @@ type compiler struct { dynamicTypes []ctype asm assembler sqlmode SQLMode + collationEnv *collations.Environment } type CompilerLog interface { @@ -415,7 +416,7 @@ func (c *compiler) compareNumericTypes(lt ctype, rt ctype) (swapped bool) { } func (c *compiler) compareAsStrings(lt ctype, rt ctype) error { - merged, coerceLeft, coerceRight, err := mergeCollations(lt.Col, rt.Col, lt.Type, rt.Type) + merged, coerceLeft, coerceRight, err := mergeCollations(lt.Col, rt.Col, lt.Type, rt.Type, c.collationEnv) if err != nil { return err } diff --git a/go/vt/vtgate/evalengine/compiler_asm.go b/go/vt/vtgate/evalengine/compiler_asm.go index cbf9df9c57e..affbee664a8 100644 --- a/go/vt/vtgate/evalengine/compiler_asm.go +++ b/go/vt/vtgate/evalengine/compiler_asm.go @@ -717,25 +717,25 @@ func (asm *assembler) CmpJSON() { }, "CMP JSON(SP-2), JSON(SP-1)") } -func (asm *assembler) CmpTuple(fullEquality bool) { +func (asm *assembler) CmpTuple(collationEnv *collations.Environment, fullEquality bool) { asm.adjustStack(-2) asm.emit(func(env *ExpressionEnv) int { l := env.vm.stack[env.vm.sp-2].(*evalTuple) r := env.vm.stack[env.vm.sp-1].(*evalTuple) env.vm.sp -= 2 - env.vm.flags.cmp, env.vm.flags.null, env.vm.err = evalCompareMany(l.t, r.t, fullEquality) + env.vm.flags.cmp, env.vm.flags.null, env.vm.err = evalCompareMany(l.t, r.t, fullEquality, collationEnv) return 1 }, "CMP TUPLE(SP-2), TUPLE(SP-1)") } -func (asm *assembler) CmpTupleNullsafe() { +func (asm *assembler) CmpTupleNullsafe(collationsEnv *collations.Environment) { asm.adjustStack(-1) asm.emit(func(env *ExpressionEnv) int { l := env.vm.stack[env.vm.sp-2].(*evalTuple) r := env.vm.stack[env.vm.sp-1].(*evalTuple) var equals int - equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t) + equals, env.vm.err = evalCompareTuplesNullSafe(l.t, r.t, collationsEnv) env.vm.stack[env.vm.sp-2] = env.vm.arena.newEvalBool(equals == 0) env.vm.sp -= 1 @@ -2014,10 +2014,10 @@ func (asm *assembler) Fn_CONV_uc(t sqltypes.Type, col collations.TypedCollation) }, "FN CONV VARCHAR(SP-3) INT64(SP-2) INT64(SP-1)") } -func (asm *assembler) Fn_COLLATION(col collations.TypedCollation) { +func (asm *assembler) Fn_COLLATION(collationEnv *collations.Environment, col collations.TypedCollation) { asm.emit(func(env *ExpressionEnv) int { v := evalCollation(env.vm.stack[env.vm.sp-1]) - env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collations.Local().LookupName(v.Collation)), col) + env.vm.stack[env.vm.sp-1] = env.vm.arena.newEvalText([]byte(collationEnv.LookupName(v.Collation)), col) return 1 }, "FN COLLATION (SP-1)") } @@ -2763,7 +2763,7 @@ func (asm *assembler) In_table(not bool, table map[vthash.Hash]struct{}) { } } -func (asm *assembler) In_slow(not bool) { +func (asm *assembler) In_slow(collationsEnv *collations.Environment, not bool) { asm.adjustStack(-1) if not { @@ -2772,7 +2772,7 @@ func (asm *assembler) In_slow(not bool) { rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) var in boolean - in, env.vm.err = evalInExpr(lhs, rhs) + in, env.vm.err = evalInExpr(collationsEnv, lhs, rhs) env.vm.stack[env.vm.sp-2] = in.not().eval() env.vm.sp -= 1 @@ -2784,7 +2784,7 @@ func (asm *assembler) In_slow(not bool) { rhs := env.vm.stack[env.vm.sp-1].(*evalTuple) var in boolean - in, env.vm.err = evalInExpr(lhs, rhs) + in, env.vm.err = evalInExpr(collationsEnv, lhs, rhs) env.vm.stack[env.vm.sp-2] = in.eval() env.vm.sp -= 1 @@ -3836,20 +3836,6 @@ func (asm *assembler) Fn_YEARWEEK() { }, "FN YEARWEEK DATE(SP-1)") } -func (asm *assembler) Interval_i(l int) { - asm.adjustStack(-l) - asm.emit(func(env *ExpressionEnv) int { - if env.vm.stack[env.vm.sp-l] == nil { - env.vm.stack[env.vm.sp-l] = env.vm.arena.newEvalInt64(-1) - env.vm.sp -= l - return 1 - } - - env.vm.sp -= l - return 1 - }, "INTERVAL INT64(SP-1)...INT64(SP-%d)", l) -} - func (asm *assembler) Interval(l int) { asm.adjustStack(-l) asm.emit(func(env *ExpressionEnv) int { diff --git a/go/vt/vtgate/evalengine/compiler_test.go b/go/vt/vtgate/evalengine/compiler_test.go index e7b51b41748..a0c29e1510f 100644 --- a/go/vt/vtgate/evalengine/compiler_test.go +++ b/go/vt/vtgate/evalengine/compiler_test.go @@ -17,6 +17,7 @@ limitations under the License. package evalengine_test import ( + "context" "fmt" "strconv" "strings" @@ -97,16 +98,16 @@ func TestCompilerReference(t *testing.T) { defer func() { evalengine.SystemTime = time.Now }() track := NewTracker() - + parser := sqlparser.NewTestParser() for _, tc := range testcases.Cases { t.Run(tc.Name(), func(t *testing.T) { var supported, total int - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collations.MySQL8()) tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - stmt, err := sqlparser.ParseExpr(query) + stmt, err := parser.ParseExpr(query) if err != nil { // no need to test un-parseable queries return @@ -117,6 +118,7 @@ func TestCompilerReference(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: true, } @@ -575,10 +577,10 @@ func TestCompilerSingle(t *testing.T) { } tz, _ := time.LoadLocation("Europe/Madrid") - + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -588,6 +590,7 @@ func TestCompilerSingle(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: true, } @@ -596,7 +599,7 @@ func TestCompilerSingle(t *testing.T) { t.Fatal(err) } - env := evalengine.EmptyExpressionEnv() + env := evalengine.NewExpressionEnv(context.Background(), nil, evalengine.NewEmptyVCursor(collations.MySQL8(), tz)) env.SetTime(time.Date(2023, 10, 24, 12, 0, 0, 0, tz)) env.Row = tc.values @@ -654,9 +657,10 @@ func TestBindVarLiteral(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } @@ -668,6 +672,7 @@ func TestBindVarLiteral(t *testing.T) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: true, } @@ -678,7 +683,7 @@ func TestBindVarLiteral(t *testing.T) { result := `VARCHAR("ÿ")` - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collations.MySQL8()) env.BindVars = map[string]*querypb.BindVariable{ "vtg1": tc.bindVar, } @@ -718,15 +723,17 @@ func TestCompilerNonConstant(t *testing.T) { }, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { t.Fatal(err) } cfg := &evalengine.Config{ Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: true, } @@ -735,7 +742,7 @@ func TestCompilerNonConstant(t *testing.T) { t.Fatal(err) } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collations.MySQL8()) var prev string for i := 0; i < 1000; i++ { expected, err := env.EvaluateAST(converted) diff --git a/go/vt/vtgate/evalengine/eval_result.go b/go/vt/vtgate/evalengine/eval_result.go index 19a6ea59220..d9916af03be 100644 --- a/go/vt/vtgate/evalengine/eval_result.go +++ b/go/vt/vtgate/evalengine/eval_result.go @@ -28,7 +28,8 @@ import ( ) type EvalResult struct { - v eval + v eval + collationEnv *collations.Environment } // Value allows for retrieval of the value we expose for public consumption. @@ -56,7 +57,7 @@ func (er EvalResult) Collation() collations.ID { } func (er EvalResult) String() string { - return er.Value(collations.Default()).String() + return er.Value(er.collationEnv.DefaultConnectionCharset()).String() } // TupleValues allows for retrieval of the value we expose for public consumption diff --git a/go/vt/vtgate/evalengine/expr_collate.go b/go/vt/vtgate/evalengine/expr_collate.go index 47e65a0dcc7..8c0f2b2a010 100644 --- a/go/vt/vtgate/evalengine/expr_collate.go +++ b/go/vt/vtgate/evalengine/expr_collate.go @@ -63,11 +63,13 @@ type ( CollateExpr struct { UnaryExpr TypedCollation collations.TypedCollation + CollationEnv *collations.Environment } IntroducerExpr struct { UnaryExpr TypedCollation collations.TypedCollation + CollationEnv *collations.Environment } ) @@ -84,7 +86,7 @@ func (c *CollateExpr) eval(env *ExpressionEnv) (eval, error) { case nil: return nil, nil case *evalBytes: - if err := collations.Local().EnsureCollate(e.col.Collation, c.TypedCollation.Collation); err != nil { + if err := env.collationEnv.EnsureCollate(e.col.Collation, c.TypedCollation.Collation); err != nil { return nil, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } b = e.withCollation(c.TypedCollation) @@ -109,7 +111,7 @@ func (expr *CollateExpr) compile(c *compiler) (ctype, error) { switch ct.Type { case sqltypes.VarChar: - if err := collations.Local().EnsureCollate(ct.Col.Collation, expr.TypedCollation.Collation); err != nil { + if err := c.collationEnv.EnsureCollate(ct.Col.Collation, expr.TypedCollation.Collation); err != nil { return ctype{}, vterrors.New(vtrpcpb.Code_INVALID_ARGUMENT, err.Error()) } fallthrough diff --git a/go/vt/vtgate/evalengine/expr_column_test.go b/go/vt/vtgate/evalengine/expr_column_test.go index b8bc5b9c640..bd7fd4250fd 100644 --- a/go/vt/vtgate/evalengine/expr_column_test.go +++ b/go/vt/vtgate/evalengine/expr_column_test.go @@ -20,6 +20,7 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -28,8 +29,9 @@ func TestTypeOf(t *testing.T) { t.Skipf("TODO: these tests are not green") env := &ExpressionEnv{ - BindVars: make(map[string]*querypb.BindVariable), - now: time.Now(), + BindVars: make(map[string]*querypb.BindVariable), + now: time.Now(), + collationEnv: collations.MySQL8(), } c := &Column{ Type: sqltypes.Unknown, diff --git a/go/vt/vtgate/evalengine/expr_compare.go b/go/vt/vtgate/evalengine/expr_compare.go index 7acead2d99a..84f40abb9c0 100644 --- a/go/vt/vtgate/evalengine/expr_compare.go +++ b/go/vt/vtgate/evalengine/expr_compare.go @@ -52,7 +52,7 @@ type ( ComparisonOp interface { String() string - compare(left, right eval) (boolean, error) + compare(collationEnv *collations.Environment, left, right eval) (boolean, error) } compareEQ struct{} @@ -72,44 +72,44 @@ func (*ComparisonExpr) filterExpr() {} func (*InExpr) filterExpr() {} func (compareEQ) String() string { return "=" } -func (compareEQ) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, true) +func (compareEQ) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, true, collationEnv) return makeboolean2(cmp == 0, isNull), err } func (compareNE) String() string { return "!=" } -func (compareNE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, true) +func (compareNE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, true, collationEnv) return makeboolean2(cmp != 0, isNull), err } func (compareLT) String() string { return "<" } -func (compareLT) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareLT) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp < 0, isNull), err } func (compareLE) String() string { return "<=" } -func (compareLE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareLE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp <= 0, isNull), err } func (compareGT) String() string { return ">" } -func (compareGT) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareGT) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp > 0, isNull), err } func (compareGE) String() string { return ">=" } -func (compareGE) compare(left, right eval) (boolean, error) { - cmp, isNull, err := evalCompareAll(left, right, false) +func (compareGE) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, isNull, err := evalCompareAll(left, right, false, collationEnv) return makeboolean2(cmp >= 0, isNull), err } func (compareNullSafeEQ) String() string { return "<=>" } -func (compareNullSafeEQ) compare(left, right eval) (boolean, error) { - cmp, err := evalCompareNullSafe(left, right) +func (compareNullSafeEQ) compare(collationEnv *collations.Environment, left, right eval) (boolean, error) { + cmp, err := evalCompareNullSafe(left, right, collationEnv) return makeboolean(cmp == 0), err } @@ -164,7 +164,7 @@ func compareAsJSON(l, r sqltypes.Type) bool { return l == sqltypes.TypeJSON || r == sqltypes.TypeJSON } -func evalCompareNullSafe(lVal, rVal eval) (int, error) { +func evalCompareNullSafe(lVal, rVal eval, collationEnv *collations.Environment) (int, error) { if lVal == nil { if rVal == nil { return 0, nil @@ -175,18 +175,18 @@ func evalCompareNullSafe(lVal, rVal eval) (int, error) { return 1, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { - return evalCompareTuplesNullSafe(left.t, right.t) + return evalCompareTuplesNullSafe(left.t, right.t, collationEnv) } - n, err := evalCompare(lVal, rVal) + n, err := evalCompare(lVal, rVal, collationEnv) return n, err } -func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { +func evalCompareMany(left, right []eval, fulleq bool, collationEnv *collations.Environment) (int, bool, error) { // For row comparisons, (a, b) = (x, y) is equivalent to: (a = x) AND (b = y) var seenNull bool for idx, lResult := range left { rResult := right[idx] - n, isNull, err := evalCompareAll(lResult, rResult, fulleq) + n, isNull, err := evalCompareAll(lResult, rResult, fulleq, collationEnv) if err != nil { return 0, false, err } @@ -203,20 +203,20 @@ func evalCompareMany(left, right []eval, fulleq bool) (int, bool, error) { return 0, seenNull, nil } -func evalCompareAll(lVal, rVal eval, fulleq bool) (int, bool, error) { +func evalCompareAll(lVal, rVal eval, fulleq bool, collationEnv *collations.Environment) (int, bool, error) { if lVal == nil || rVal == nil { return 0, true, nil } if left, right, ok := compareAsTuples(lVal, rVal); ok { - return evalCompareMany(left.t, right.t, fulleq) + return evalCompareMany(left.t, right.t, fulleq, collationEnv) } - n, err := evalCompare(lVal, rVal) + n, err := evalCompare(lVal, rVal, collationEnv) return n, false, err } // For more details on comparison expression evaluation and type conversion: // - https://dev.mysql.com/doc/refman/8.0/en/type-conversion.html -func evalCompare(left, right eval) (comp int, err error) { +func evalCompare(left, right eval, collationEnv *collations.Environment) (comp int, err error) { lt := left.SQLType() rt := right.SQLType() @@ -224,7 +224,7 @@ func evalCompare(left, right eval) (comp int, err error) { case compareAsDates(lt, rt): return compareDates(left.(*evalTemporal), right.(*evalTemporal)), nil case compareAsStrings(lt, rt): - return compareStrings(left, right) + return compareStrings(left, right, collationEnv) case compareAsSameNumericType(lt, rt) || compareAsDecimal(lt, rt): return compareNumeric(left, right) case compareAsDateAndString(lt, rt): @@ -269,12 +269,12 @@ func fallbackBinary(t sqltypes.Type) bool { return false } -func evalCompareTuplesNullSafe(left, right []eval) (int, error) { +func evalCompareTuplesNullSafe(left, right []eval, collationEnv *collations.Environment) (int, error) { if len(left) != len(right) { panic("did not typecheck cardinality") } for idx, lResult := range left { - res, err := evalCompareNullSafe(lResult, right[idx]) + res, err := evalCompareNullSafe(lResult, right[idx], collationEnv) if err != nil { return 0, err } @@ -302,7 +302,7 @@ func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { if _, ok := c.Op.(compareNullSafeEQ); !ok && right == nil { return nil, nil } - cmp, err := c.Op.compare(left, right) + cmp, err := c.Op.compare(env.collationEnv, left, right) if err != nil { return nil, err } @@ -312,25 +312,25 @@ func (c *ComparisonExpr) eval(env *ExpressionEnv) (eval, error) { func (expr *ComparisonExpr) compileAsTuple(c *compiler) (ctype, error) { switch expr.Op.(type) { case compareNullSafeEQ: - c.asm.CmpTupleNullsafe() + c.asm.CmpTupleNullsafe(c.collationEnv) return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean}, nil case compareEQ: - c.asm.CmpTuple(true) + c.asm.CmpTuple(c.collationEnv, true) c.asm.Cmp_eq_n() case compareNE: - c.asm.CmpTuple(true) + c.asm.CmpTuple(c.collationEnv, true) c.asm.Cmp_ne_n() case compareLT: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.collationEnv, false) c.asm.Cmp_lt_n() case compareLE: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.collationEnv, false) c.asm.Cmp_le_n() case compareGT: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.collationEnv, false) c.asm.Cmp_gt_n() case compareGE: - c.asm.CmpTuple(false) + c.asm.CmpTuple(c.collationEnv, false) c.asm.Cmp_ge_n() default: panic("invalid comparison operator") @@ -455,14 +455,14 @@ func (expr *ComparisonExpr) compile(c *compiler) (ctype, error) { return cmptype, nil } -func evalInExpr(lhs eval, rhs *evalTuple) (boolean, error) { +func evalInExpr(collationEnv *collations.Environment, lhs eval, rhs *evalTuple) (boolean, error) { if lhs == nil { return boolNULL, nil } var foundNull, found bool for _, rtuple := range rhs.t { - numeric, isNull, err := evalCompareAll(lhs, rtuple, true) + numeric, isNull, err := evalCompareAll(lhs, rtuple, true, collationEnv) if err != nil { return boolNULL, err } @@ -496,7 +496,7 @@ func (i *InExpr) eval(env *ExpressionEnv) (eval, error) { if !ok { return nil, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "rhs of an In operation should be a tuple") } - in, err := evalInExpr(left, rtuple) + in, err := evalInExpr(env.collationEnv, left, rtuple) if err != nil { return nil, err } @@ -553,7 +553,7 @@ func (expr *InExpr) compile(c *compiler) (ctype, error) { if err != nil { return ctype{}, err } - c.asm.In_slow(expr.Negate) + c.asm.In_slow(c.collationEnv, expr.Negate) } return ctype{Type: sqltypes.Int64, Col: collationNumeric, Flag: flagIsBoolean | (nullableFlags(lhs.Flag) | (rt.Flag & flagNullable))}, nil @@ -580,7 +580,7 @@ func (l *LikeExpr) eval(env *ExpressionEnv) (eval, error) { } var col collations.TypedCollation - left, right, col, err = mergeAndCoerceCollations(left, right) + left, right, col, err = mergeAndCoerceCollations(left, right, env.collationEnv) if err != nil { return nil, err } @@ -633,10 +633,9 @@ func (expr *LikeExpr) compile(c *compiler) (ctype, error) { var merged collations.TypedCollation var coerceLeft colldata.Coercion var coerceRight colldata.Coercion - var env = collations.Local() if lt.Col.Collation != rt.Col.Collation { - merged, coerceLeft, coerceRight, err = colldata.Merge(env, lt.Col, rt.Col, colldata.CoercionOptions{ + merged, coerceLeft, coerceRight, err = colldata.Merge(c.collationEnv, lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) diff --git a/go/vt/vtgate/evalengine/expr_convert.go b/go/vt/vtgate/evalengine/expr_convert.go index 5b2d82b707f..f4dab09dafe 100644 --- a/go/vt/vtgate/evalengine/expr_convert.go +++ b/go/vt/vtgate/evalengine/expr_convert.go @@ -31,11 +31,13 @@ type ( Length, Scale int HasLength, HasScale bool Collation collations.ID + CollationEnv *collations.Environment } ConvertUsingExpr struct { UnaryExpr - Collation collations.ID + Collation collations.ID + CollationEnv *collations.Environment } ) diff --git a/go/vt/vtgate/evalengine/expr_env.go b/go/vt/vtgate/evalengine/expr_env.go index 1c92b0a45ee..b349cd01c04 100644 --- a/go/vt/vtgate/evalengine/expr_env.go +++ b/go/vt/vtgate/evalengine/expr_env.go @@ -21,6 +21,8 @@ import ( "strings" "time" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/datetime" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" @@ -31,6 +33,7 @@ type VCursor interface { TimeZone() *time.Location GetKeyspace() string SQLMode() string + CollationEnv() *collations.Environment } type ( @@ -44,10 +47,11 @@ type ( Fields []*querypb.Field // internal state - now time.Time - vc VCursor - user *querypb.VTGateCallerID - sqlmode SQLMode + now time.Time + vc VCursor + user *querypb.VTGateCallerID + sqlmode SQLMode + collationEnv *collations.Environment } ) @@ -88,12 +92,12 @@ func (env *ExpressionEnv) Evaluate(expr Expr) (EvalResult, error) { return env.EvaluateVM(p) } e, err := expr.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } func (env *ExpressionEnv) EvaluateAST(expr Expr) (EvalResult, error) { e, err := expr.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } func (env *ExpressionEnv) TypeOf(expr Expr) (Type, error) { @@ -113,9 +117,34 @@ func (env *ExpressionEnv) SetTime(now time.Time) { } } +type emptyVCursor struct { + collationEnv *collations.Environment + tz *time.Location +} + +func (e *emptyVCursor) TimeZone() *time.Location { + return e.tz +} + +func (e *emptyVCursor) GetKeyspace() string { + return "" +} + +func (e *emptyVCursor) SQLMode() string { + return config.DefaultSQLMode +} + +func (e *emptyVCursor) CollationEnv() *collations.Environment { + return e.collationEnv +} + +func NewEmptyVCursor(collationEnv *collations.Environment, tz *time.Location) VCursor { + return &emptyVCursor{collationEnv: collationEnv, tz: tz} +} + // EmptyExpressionEnv returns a new ExpressionEnv with no bind vars or row -func EmptyExpressionEnv() *ExpressionEnv { - return NewExpressionEnv(context.Background(), nil, nil) +func EmptyExpressionEnv(collationEnv *collations.Environment) *ExpressionEnv { + return NewExpressionEnv(context.Background(), nil, NewEmptyVCursor(collationEnv, time.Local)) } // NewExpressionEnv returns an expression environment with no current row, but with bindvars @@ -123,9 +152,8 @@ func NewExpressionEnv(ctx context.Context, bindVars map[string]*querypb.BindVari env := &ExpressionEnv{BindVars: bindVars, vc: vc} env.user = callerid.ImmediateCallerIDFromContext(ctx) env.SetTime(time.Now()) - if vc != nil { - env.sqlmode = ParseSQLMode(vc.SQLMode()) - } + env.sqlmode = ParseSQLMode(vc.SQLMode()) + env.collationEnv = vc.CollationEnv() return env } diff --git a/go/vt/vtgate/evalengine/expr_logical.go b/go/vt/vtgate/evalengine/expr_logical.go index 7fe836d7164..c9332e613c3 100644 --- a/go/vt/vtgate/evalengine/expr_logical.go +++ b/go/vt/vtgate/evalengine/expr_logical.go @@ -17,7 +17,6 @@ limitations under the License. package evalengine import ( - "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" ) @@ -586,7 +585,6 @@ func (is *IsExpr) compile(c *compiler) (ctype, error) { func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { var ta typeAggregation var ca collationAggregation - var local = collations.Local() var result eval var matched = false @@ -606,7 +604,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } ta.addEval(then) - if err := ca.add(local, evalCollation(then)); err != nil { + if err := ca.add(evalCollation(then), env.collationEnv); err != nil { return nil, err } @@ -621,7 +619,7 @@ func (c *CaseExpr) eval(env *ExpressionEnv) (eval, error) { return nil, err } ta.addEval(e) - if err := ca.add(local, evalCollation(e)); err != nil { + if err := ca.add(evalCollation(e), env.collationEnv); err != nil { return nil, err } if !matched { @@ -676,7 +674,6 @@ func (c *CaseExpr) simplify(env *ExpressionEnv) error { func (cs *CaseExpr) compile(c *compiler) (ctype, error) { var ca collationAggregation var ta typeAggregation - var local = collations.Local() for _, wt := range cs.cases { when, err := wt.when.compile(c) @@ -694,7 +691,7 @@ func (cs *CaseExpr) compile(c *compiler) (ctype, error) { } ta.add(then.Type, then.Flag) - if err := ca.add(local, then.Col); err != nil { + if err := ca.add(then.Col, c.collationEnv); err != nil { return ctype{}, err } } @@ -706,7 +703,7 @@ func (cs *CaseExpr) compile(c *compiler) (ctype, error) { } ta.add(els.Type, els.Flag) - if err := ca.add(local, els.Col); err != nil { + if err := ca.add(els.Col, c.collationEnv); err != nil { return ctype{}, err } } diff --git a/go/vt/vtgate/evalengine/fn_compare.go b/go/vt/vtgate/evalengine/fn_compare.go index 835d84c3d39..02c2532fc9a 100644 --- a/go/vt/vtgate/evalengine/fn_compare.go +++ b/go/vt/vtgate/evalengine/fn_compare.go @@ -32,7 +32,7 @@ type ( CallExpr } - multiComparisonFunc func(args []eval, cmp int) (eval, error) + multiComparisonFunc func(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) builtinMultiComparison struct { CallExpr @@ -58,9 +58,8 @@ func (b *builtinCoalesce) eval(env *ExpressionEnv) (eval, error) { func (b *builtinCoalesce) compile(c *compiler) (ctype, error) { var ( - ta typeAggregation - ca collationAggregation - local = collations.Local() + ta typeAggregation + ca collationAggregation ) f := flagNullable @@ -73,7 +72,7 @@ func (b *builtinCoalesce) compile(c *compiler) (ctype, error) { f = 0 } ta.add(tt.Type, tt.Flag) - if err := ca.add(local, tt.Col); err != nil { + if err := ca.add(tt.Col, c.collationEnv); err != nil { return ctype{}, err } } @@ -115,7 +114,7 @@ func getMultiComparisonFunc(args []eval) multiComparisonFunc { for _, arg := range args { if arg == nil { - return func(args []eval, cmp int) (eval, error) { + return func(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) { return nil, nil } } @@ -166,7 +165,7 @@ func getMultiComparisonFunc(args []eval) multiComparisonFunc { panic("unexpected argument type") } -func compareAllInteger_u(args []eval, cmp int) (eval, error) { +func compareAllInteger_u(_ *collations.Environment, args []eval, cmp int) (eval, error) { x := args[0].(*evalUint64) for _, arg := range args[1:] { y := arg.(*evalUint64) @@ -177,7 +176,7 @@ func compareAllInteger_u(args []eval, cmp int) (eval, error) { return x, nil } -func compareAllInteger_i(args []eval, cmp int) (eval, error) { +func compareAllInteger_i(_ *collations.Environment, args []eval, cmp int) (eval, error) { x := args[0].(*evalInt64) for _, arg := range args[1:] { y := arg.(*evalInt64) @@ -188,7 +187,7 @@ func compareAllInteger_i(args []eval, cmp int) (eval, error) { return x, nil } -func compareAllFloat(args []eval, cmp int) (eval, error) { +func compareAllFloat(_ *collations.Environment, args []eval, cmp int) (eval, error) { candidateF, ok := evalToFloat(args[0]) if !ok { return nil, errDecimalOutOfRange @@ -213,7 +212,7 @@ func evalDecimalPrecision(e eval) int32 { return 0 } -func compareAllDecimal(args []eval, cmp int) (eval, error) { +func compareAllDecimal(_ *collations.Environment, args []eval, cmp int) (eval, error) { decExtreme := evalToDecimal(args[0], 0, 0).dec precExtreme := evalDecimalPrecision(args[0]) @@ -230,14 +229,12 @@ func compareAllDecimal(args []eval, cmp int) (eval, error) { return newEvalDecimalWithPrec(decExtreme, precExtreme), nil } -func compareAllText(args []eval, cmp int) (eval, error) { - env := collations.Local() - +func compareAllText(collationEnv *collations.Environment, args []eval, cmp int) (eval, error) { var charsets = make([]charset.Charset, 0, len(args)) var ca collationAggregation for _, arg := range args { col := evalCollation(arg) - if err := ca.add(env, col); err != nil { + if err := ca.add(col, collationEnv); err != nil { return nil, err } charsets = append(charsets, colldata.Lookup(col.Collation).Charset()) @@ -265,7 +262,7 @@ func compareAllText(args []eval, cmp int) (eval, error) { return newEvalText(b1, tc), nil } -func compareAllBinary(args []eval, cmp int) (eval, error) { +func compareAllBinary(_ *collations.Environment, args []eval, cmp int) (eval, error) { candidateB := args[0].ToRawBytes() for _, arg := range args[1:] { @@ -283,17 +280,15 @@ func (call *builtinMultiComparison) eval(env *ExpressionEnv) (eval, error) { if err != nil { return nil, err } - return getMultiComparisonFunc(args)(args, call.cmp) + return getMultiComparisonFunc(args)(env.collationEnv, args, call.cmp) } func (call *builtinMultiComparison) compile_c(c *compiler, args []ctype) (ctype, error) { - env := collations.Local() - var ca collationAggregation var f typeFlag for _, arg := range args { f |= nullableFlags(arg.Flag) - if err := ca.add(env, arg.Col); err != nil { + if err := ca.add(arg.Col, c.collationEnv); err != nil { return ctype{}, err } } diff --git a/go/vt/vtgate/evalengine/fn_regexp.go b/go/vt/vtgate/evalengine/fn_regexp.go index 4897ba63f6a..87923423424 100644 --- a/go/vt/vtgate/evalengine/fn_regexp.go +++ b/go/vt/vtgate/evalengine/fn_regexp.go @@ -91,7 +91,7 @@ func position(val *evalInt64, limit int64, f string) (int64, error) { return pos, nil } -func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { +func evalRegexpCollation(env *collations.Environment, input, pat eval, f string) (eval, eval, collations.TypedCollation, icuregex.RegexpFlag, error) { var typedCol collations.TypedCollation var err error @@ -101,7 +101,6 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type patCol := patBytes.col.Collation if (inputCol == collations.CollationBinaryID && patCol != collations.CollationBinaryID) || (inputCol != collations.CollationBinaryID && patCol == collations.CollationBinaryID) { - env := collations.Local() inputColName := env.LookupName(inputCol) patColName := env.LookupName(patCol) return nil, nil, typedCol, 0, vterrors.NewErrorf(vtrpcpb.Code_INVALID_ARGUMENT, vterrors.CharacterSetMismatch, "Character set '%s' cannot be used in conjunction with '%s' in call to %s.", inputColName, patColName, f) @@ -109,13 +108,13 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type } } - input, pat, typedCol, err = mergeAndCoerceCollations(input, pat) + input, pat, typedCol, err = mergeAndCoerceCollations(input, pat, env) if err != nil { return nil, nil, collations.TypedCollation{}, 0, err } var flags icuregex.RegexpFlag - var collation = collations.Local().LookupName(typedCol.Collation) + collation := env.LookupName(typedCol.Collation) if strings.Contains(collation, "_ci") { flags |= icuregex.CaseInsensitive } @@ -123,11 +122,10 @@ func evalRegexpCollation(input, pat eval, f string) (eval, eval, collations.Type return input, pat, typedCol, flags, nil } -func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { +func compileRegexpCollation(env *collations.Environment, input, pat ctype, f string) (collations.TypedCollation, icuregex.RegexpFlag, error) { var merged collations.TypedCollation var err error - env := collations.Local() if input.isTextual() && pat.isTextual() { inputCol := input.Col.Collation patCol := pat.Col.Collation @@ -140,7 +138,7 @@ func compileRegexpCollation(input, pat ctype, f string) (collations.TypedCollati } if input.Col.Collation != pat.Col.Collation { - merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type) + merged, _, _, err = mergeCollations(input.Col, pat.Col, input.Type, pat.Type, env) } else { merged = input.Col } @@ -218,7 +216,7 @@ func compileConstantRegex(c *compiler, args TupleExpr, pat, mt int, cs collation return nil, c.unsupported(pattern) } var err error - staticEnv := EmptyExpressionEnv() + staticEnv := EmptyExpressionEnv(c.collationEnv) pattern, err = simplifyExpr(staticEnv, pattern) if err != nil { return nil, err @@ -278,7 +276,7 @@ func (r *builtinRegexpLike) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_like") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_like") if err != nil { return nil, err } @@ -348,7 +346,7 @@ func (r *builtinRegexpLike) compile(c *compiler) (ctype, error) { skips = append(skips, c.compileNullCheckArg(f, 2)) } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_like") + merged, flags, err := compileRegexpCollation(c.collationEnv, input, pat, "regexp_like") if err != nil { return ctype{}, err } @@ -387,7 +385,7 @@ func (r *builtinRegexpInstr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_instr") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_instr") if err != nil { return nil, err } @@ -555,7 +553,7 @@ func (r *builtinRegexpInstr) compile(c *compiler) (ctype, error) { } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_instr") + merged, flags, err := compileRegexpCollation(c.collationEnv, input, pat, "regexp_instr") if err != nil { return ctype{}, err } @@ -594,7 +592,7 @@ func (r *builtinRegexpSubstr) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_substr") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_substr") if err != nil { return nil, err } @@ -732,7 +730,7 @@ func (r *builtinRegexpSubstr) compile(c *compiler) (ctype, error) { } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_substr") + merged, flags, err := compileRegexpCollation(c.collationEnv, input, pat, "regexp_substr") if err != nil { return ctype{}, err } @@ -828,7 +826,7 @@ func (r *builtinRegexpReplace) eval(env *ExpressionEnv) (eval, error) { return nil, err } - input, pat, typedCol, flags, err := evalRegexpCollation(input, pat, "regexp_replace") + input, pat, typedCol, flags, err := evalRegexpCollation(env.collationEnv, input, pat, "regexp_replace") if err != nil { return nil, err } @@ -972,7 +970,7 @@ func (r *builtinRegexpReplace) compile(c *compiler) (ctype, error) { } } - merged, flags, err := compileRegexpCollation(input, pat, "regexp_replace") + merged, flags, err := compileRegexpCollation(c.collationEnv, input, pat, "regexp_replace") if err != nil { return ctype{}, err } diff --git a/go/vt/vtgate/evalengine/fn_string.go b/go/vt/vtgate/evalengine/fn_string.go index 97d461a44d6..fb75642b164 100644 --- a/go/vt/vtgate/evalengine/fn_string.go +++ b/go/vt/vtgate/evalengine/fn_string.go @@ -396,7 +396,7 @@ func (c *builtinCollation) eval(env *ExpressionEnv) (eval, error) { // the collation of a `COLLATION` expr is hardcoded to `utf8mb3_general_ci`, // not to the default collation of our connection. this is probably a bug in MySQL, but we match it - return newEvalText([]byte(collations.Local().LookupName(col.Collation)), collationUtf8mb3), nil + return newEvalText([]byte(env.collationEnv.LookupName(col.Collation)), collationUtf8mb3), nil } func (expr *builtinCollation) compile(c *compiler) (ctype, error) { @@ -407,7 +407,7 @@ func (expr *builtinCollation) compile(c *compiler) (ctype, error) { skip := c.asm.jumpFrom() - c.asm.Fn_COLLATION(collationUtf8mb3) + c.asm.Fn_COLLATION(c.collationEnv, collationUtf8mb3) c.asm.jumpDestination(skip) return ctype{Type: sqltypes.VarChar, Col: collationUtf8mb3}, nil @@ -755,7 +755,7 @@ func (l *builtinStrcmp) eval(env *ExpressionEnv) (eval, error) { col1 := evalCollation(left) col2 := evalCollation(right) - mcol, _, _, err := colldata.Merge(collations.Local(), col1, col2, colldata.CoercionOptions{ + mcol, _, _, err := colldata.Merge(env.collationEnv, col1, col2, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -795,7 +795,7 @@ func (expr *builtinStrcmp) compile(c *compiler) (ctype, error) { if sqltypes.IsNumber(lt.Type) || sqltypes.IsNumber(rt.Type) { mcol = collationNumeric } else { - mcol, _, _, err = colldata.Merge(collations.Local(), lt.Col, rt.Col, colldata.CoercionOptions{ + mcol, _, _, err = colldata.Merge(c.collationEnv, lt.Col, rt.Col, colldata.CoercionOptions{ ConvertToSuperset: true, ConvertWithCoercion: true, }) @@ -966,7 +966,6 @@ func concatConvert(buf []byte, str *evalBytes, tc collations.TypedCollation) ([] } func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -979,7 +978,7 @@ func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { args = append(args, a) tt = concatSQLType(a.SQLType(), tt) - err = ca.add(local, evalCollation(a)) + err = ca.add(evalCollation(a), env.collationEnv) if err != nil { return nil, err } @@ -1014,7 +1013,6 @@ func (call *builtinConcat) eval(env *ExpressionEnv) (eval, error) { } func (call *builtinConcat) compile(c *compiler) (ctype, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar var f typeFlag @@ -1031,7 +1029,7 @@ func (call *builtinConcat) compile(c *compiler) (ctype, error) { args = append(args, a) tt = concatSQLType(a.Type, tt) - err = ca.add(local, a.Col) + err = ca.add(a.Col, c.collationEnv) if err != nil { return ctype{}, err } @@ -1073,7 +1071,6 @@ type builtinConcatWs struct { } func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -1093,7 +1090,7 @@ func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { args = append(args, a) tt = concatSQLType(a.SQLType(), tt) - err = ca.add(local, evalCollation(a)) + err = ca.add(evalCollation(a), env.collationEnv) if err != nil { return nil, err } @@ -1143,7 +1140,6 @@ func (call *builtinConcatWs) eval(env *ExpressionEnv) (eval, error) { } func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { - local := collations.Local() var ca collationAggregation tt := sqltypes.VarChar @@ -1156,7 +1152,7 @@ func (call *builtinConcatWs) compile(c *compiler) (ctype, error) { } tt = concatSQLType(a.Type, tt) - err = ca.add(local, a.Col) + err = ca.add(a.Col, c.collationEnv) if err != nil { return ctype{}, err } diff --git a/go/vt/vtgate/evalengine/format.go b/go/vt/vtgate/evalengine/format.go index db473fd418e..ee6b66b192a 100644 --- a/go/vt/vtgate/evalengine/format.go +++ b/go/vt/vtgate/evalengine/format.go @@ -206,12 +206,12 @@ func (tuple TupleExpr) format(buf *sqlparser.TrackedBuffer) { func (c *CollateExpr) format(buf *sqlparser.TrackedBuffer) { formatExpr(buf, c, c.Inner, true) buf.WriteLiteral(" COLLATE ") - buf.WriteString(collations.Local().LookupName(c.TypedCollation.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.TypedCollation.Collation)) } func (i *IntroducerExpr) format(buf *sqlparser.TrackedBuffer) { buf.WriteString("_") - buf.WriteString(collations.Local().LookupName(i.TypedCollation.Collation)) + buf.WriteString(i.CollationEnv.LookupName(i.TypedCollation.Collation)) formatExpr(buf, i, i.Inner, true) } @@ -294,7 +294,7 @@ func (c *ConvertExpr) format(buf *sqlparser.TrackedBuffer) { } if c.Collation != collations.Unknown { buf.WriteLiteral(" character set ") - buf.WriteString(collations.Local().LookupName(c.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.Collation)) } buf.WriteByte(')') } @@ -303,7 +303,7 @@ func (c *ConvertUsingExpr) format(buf *sqlparser.TrackedBuffer) { buf.WriteLiteral("convert(") formatExpr(buf, c, c.Inner, true) buf.WriteLiteral(" using ") - buf.WriteString(collations.Local().LookupName(c.Collation)) + buf.WriteString(c.CollationEnv.LookupName(c.Collation)) buf.WriteByte(')') } diff --git a/go/vt/vtgate/evalengine/integration/comparison_test.go b/go/vt/vtgate/evalengine/integration/comparison_test.go index 44c02b2a5a5..bda73a6d5f4 100644 --- a/go/vt/vtgate/evalengine/integration/comparison_test.go +++ b/go/vt/vtgate/evalengine/integration/comparison_test.go @@ -44,8 +44,6 @@ import ( ) var ( - collationEnv *collations.Environment - debugGolden = false debugNormalize = true debugSimplify = time.Now().UnixNano()&1 != 0 @@ -82,7 +80,7 @@ func normalizeValue(v sqltypes.Value, coll collations.ID) sqltypes.Value { return v } -func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string, fields []*querypb.Field, cmp *testcases.Comparison) { +func compareRemoteExprEnv(t *testing.T, collationEnv *collations.Environment, env *evalengine.ExpressionEnv, conn *mysql.Conn, expr string, fields []*querypb.Field, cmp *testcases.Comparison) { t.Helper() localQuery := "SELECT " + expr @@ -145,7 +143,7 @@ func compareRemoteExprEnv(t *testing.T, env *evalengine.ExpressionEnv, conn *mys var localVal, remoteVal sqltypes.Value var localCollation, remoteCollation collations.ID if localErr == nil { - v := local.Value(collations.Default()) + v := local.Value(collations.MySQL8().DefaultConnectionCharset()) if debugCheckCollations { if v.IsNull() { localCollation = collations.CollationBinaryID @@ -220,6 +218,10 @@ func (vc *vcursor) SQLMode() string { return config.DefaultSQLMode } +func (vc *vcursor) CollationEnv() *collations.Environment { + return collations.MySQL8() +} + func initTimezoneData(t *testing.T, conn *mysql.Conn) { // We load the timezone information into MySQL. The evalengine assumes // our backend MySQL is configured with the timezone information as well @@ -253,7 +255,7 @@ func TestMySQL(t *testing.T) { // We require MySQL 8.0 collations for the comparisons in the tests servenv.SetMySQLServerVersionForTest(conn.ServerVersion) - collationEnv = collations.NewEnvironment(conn.ServerVersion) + collationEnv := collations.NewEnvironment(conn.ServerVersion) servenv.OnParse(registerFlags) initTimezoneData(t, conn) @@ -265,7 +267,7 @@ func TestMySQL(t *testing.T) { env := evalengine.NewExpressionEnv(ctx, nil, &vcursor{}) tc.Run(func(query string, row []sqltypes.Value) { env.Row = row - compareRemoteExprEnv(t, env, conn, query, tc.Schema, tc.Compare) + compareRemoteExprEnv(t, collationEnv, env, conn, query, tc.Schema, tc.Compare) }) }) } diff --git a/go/vt/vtgate/evalengine/integration/fuzz_test.go b/go/vt/vtgate/evalengine/integration/fuzz_test.go index 657fbcb7c68..8e401fd19f6 100644 --- a/go/vt/vtgate/evalengine/integration/fuzz_test.go +++ b/go/vt/vtgate/evalengine/integration/fuzz_test.go @@ -19,7 +19,6 @@ limitations under the License. package integration import ( - "context" "encoding/json" "fmt" "math/rand" @@ -132,7 +131,7 @@ func errorsMatch(remote, local error) bool { } func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields []*querypb.Field) (evalengine.EvalResult, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { return evalengine.EvalResult{}, err } @@ -147,6 +146,7 @@ func evaluateLocalEvalengine(env *evalengine.ExpressionEnv, query string, fields cfg := &evalengine.Config{ ResolveColumn: evalengine.FieldResolver(fields).Column, Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: !debugSimplify, } expr, err = evalengine.Translate(astExpr, cfg) @@ -200,7 +200,7 @@ func TestGenerateFuzzCases(t *testing.T) { compareWithMySQL := func(expr sqlparser.Expr) *mismatch { query := "SELECT " + sqlparser.String(expr) - env := evalengine.NewExpressionEnv(context.Background(), nil, nil) + env := evalengine.EmptyExpressionEnv(collations.MySQL8()) eval, localErr := evaluateLocalEvalengine(env, query, nil) remote, remoteErr := conn.ExecuteFetch(query, 1, false) @@ -218,7 +218,7 @@ func TestGenerateFuzzCases(t *testing.T) { remoteErr: remoteErr, } if localErr == nil { - res.localVal = eval.Value(collations.Default()) + res.localVal = eval.Value(collations.MySQL8().DefaultConnectionCharset()) } if remoteErr == nil { res.remoteVal = remote.Rows[0][0] @@ -233,7 +233,7 @@ func TestGenerateFuzzCases(t *testing.T) { var start = time.Now() for len(failures) < fuzzMaxFailures { query := "SELECT " + gen.expr() - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatal(err) } @@ -333,7 +333,7 @@ func compareResult(local, remote Result, cmp *testcases.Comparison) error { var localCollationName string var remoteCollationName string - env := collations.Local() + env := collations.MySQL8() if coll := local.Collation; coll != collations.Unknown { localCollationName = env.LookupName(coll) } diff --git a/go/vt/vtgate/evalengine/mysql_test.go b/go/vt/vtgate/evalengine/mysql_test.go index bfa503d82dd..eac881ba850 100644 --- a/go/vt/vtgate/evalengine/mysql_test.go +++ b/go/vt/vtgate/evalengine/mysql_test.go @@ -17,7 +17,6 @@ limitations under the License. package evalengine import ( - "context" "encoding/json" "errors" "os" @@ -63,13 +62,14 @@ func knownBadQuery(e Expr) bool { var errKnownBadQuery = errors.New("this query is known to give bad results in MySQL") func convert(t *testing.T, query string, simplify bool) (Expr, error) { - stmt, err := sqlparser.Parse(query) + stmt, err := sqlparser.NewTestParser().Parse(query) if err != nil { t.Fatalf("failed to parse '%s': %v", query, err) } cfg := &Config{ Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), NoConstantFolding: !simplify, } @@ -89,7 +89,7 @@ func testSingle(t *testing.T, query string) (EvalResult, error) { if err != nil { return EvalResult{}, err } - return NewExpressionEnv(context.Background(), nil, nil).Evaluate(converted) + return EmptyExpressionEnv(collations.MySQL8()).Evaluate(converted) } func TestMySQLGolden(t *testing.T) { @@ -141,11 +141,11 @@ func TestMySQLGolden(t *testing.T) { continue } if tc.Error != "" { - t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.Default())) + t.Errorf("query %d: %s\nmysql err: %s\nvitess val: %s", testcount, tc.Query, tc.Error, eval.Value(collations.MySQL8().DefaultConnectionCharset())) continue } if eval.String() != tc.Value { - t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.Default())) + t.Errorf("query %d: %s\nmysql val: %s\nvitess val: %s", testcount, tc.Query, tc.Value, eval.Value(collations.MySQL8().DefaultConnectionCharset())) continue } ok++ @@ -159,5 +159,5 @@ func TestMySQLGolden(t *testing.T) { func TestDebug1(t *testing.T) { // Debug eval, err := testSingle(t, `SELECT _latin1 0xFF regexp _latin1 '[[:lower:]]' COLLATE latin1_bin`) - t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.Local().LookupName(eval.Collation())) + t.Logf("eval=%s err=%v coll=%s", eval.String(), err, collations.MySQL8().LookupName(eval.Collation())) } diff --git a/go/vt/vtgate/evalengine/perf_test.go b/go/vt/vtgate/evalengine/perf_test.go index 10974cd313d..b1ac1536822 100644 --- a/go/vt/vtgate/evalengine/perf_test.go +++ b/go/vt/vtgate/evalengine/perf_test.go @@ -22,8 +22,9 @@ func BenchmarkCompilerExpressions(b *testing.B) { {"comparison_f", "column0 = 12", []sqltypes.Value{sqltypes.NewFloat64(420.0)}}, } + parser := sqlparser.NewTestParser() for _, tc := range testCases { - expr, err := sqlparser.ParseExpr(tc.expression) + expr, err := parser.ParseExpr(tc.expression) if err != nil { b.Fatal(err) } @@ -33,6 +34,7 @@ func BenchmarkCompilerExpressions(b *testing.B) { ResolveColumn: fields.Column, ResolveType: fields.Type, Collation: collations.CollationUtf8mb4ID, + CollationEnv: collations.MySQL8(), } translated, err := evalengine.Translate(expr, cfg) diff --git a/go/vt/vtgate/evalengine/translate.go b/go/vt/vtgate/evalengine/translate.go index ea38f116de2..917fa5e5199 100644 --- a/go/vt/vtgate/evalengine/translate.go +++ b/go/vt/vtgate/evalengine/translate.go @@ -334,7 +334,7 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (IR if err != nil { return nil, err } - coll := collations.Local().LookupByName(collate.Collation) + coll := ast.cfg.CollationEnv.LookupByName(collate.Collation) if coll == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown collation: '%s'", collate.Collation) } @@ -345,6 +345,7 @@ func (ast *astCompiler) translateCollateExpr(collate *sqlparser.CollateExpr) (IR Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, + CollationEnv: ast.cfg.CollationEnv, }, nil } @@ -358,7 +359,7 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer if strings.ToLower(introduced.CharacterSet) == "_binary" { collation = collations.CollationBinaryID } else { - defaultCollation := collations.Local().DefaultCollationForCharset(introduced.CharacterSet[1:]) + defaultCollation := ast.cfg.CollationEnv.DefaultCollationForCharset(introduced.CharacterSet[1:]) if defaultCollation == collations.Unknown { panic(fmt.Sprintf("unknown character set: %s", introduced.CharacterSet)) } @@ -389,6 +390,7 @@ func (ast *astCompiler) translateIntroducerExpr(introduced *sqlparser.Introducer Coercibility: collations.CoerceExplicit, Repertoire: collations.RepertoireUnicode, }, + CollationEnv: ast.cfg.CollationEnv, }, nil default: panic("character set introducers are only supported for literals and arguments") @@ -420,7 +422,7 @@ func (ast *astCompiler) translateUnaryExpr(unary *sqlparser.UnaryExpr) (IR, erro case sqlparser.TildaOp: return &BitwiseNotExpr{UnaryExpr: UnaryExpr{expr}}, nil case sqlparser.NStringOp: - return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8mb3ID}, nil + return &ConvertExpr{UnaryExpr: UnaryExpr{expr}, Type: "NCHAR", Collation: collations.CollationUtf8mb3ID, CollationEnv: ast.cfg.CollationEnv}, nil default: return nil, translateExprNotSupported(unary) } @@ -570,16 +572,10 @@ type Config struct { NoConstantFolding bool NoCompilation bool SQLMode SQLMode + CollationEnv *collations.Environment } func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { - if cfg == nil { - cfg = &Config{} - } - if cfg.Collation == collations.Unknown { - cfg.Collation = collations.Default() - } - ast := astCompiler{cfg: cfg} expr, err := ast.translateExpr(e) @@ -592,7 +588,7 @@ func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { } if !cfg.NoConstantFolding { - staticEnv := EmptyExpressionEnv() + staticEnv := EmptyExpressionEnv(cfg.CollationEnv) expr, err = simplifyExpr(staticEnv, expr) if err != nil { return nil, err @@ -604,14 +600,15 @@ func Translate(e sqlparser.Expr, cfg *Config) (Expr, error) { } if len(ast.untyped) == 0 && !cfg.NoCompilation { - comp := compiler{collation: cfg.Collation, sqlmode: cfg.SQLMode} + comp := compiler{collation: cfg.Collation, collationEnv: cfg.CollationEnv, sqlmode: cfg.SQLMode} return comp.compile(expr) } return &UntypedExpr{ - ir: expr, - collation: cfg.Collation, - needTypes: ast.untyped, + ir: expr, + collation: cfg.Collation, + collationEnv: cfg.CollationEnv, + needTypes: ast.untyped, }, nil } @@ -627,9 +624,14 @@ type typedExpr struct { err error } -func (typed *typedExpr) compile(expr IR, collation collations.ID, sqlmode SQLMode) (*CompiledExpr, error) { +func (typed *typedExpr) compile(expr IR, collation collations.ID, collationEnv *collations.Environment, sqlmode SQLMode) (*CompiledExpr, error) { typed.once.Do(func() { - comp := compiler{collation: collation, dynamicTypes: typed.types, sqlmode: sqlmode} + comp := compiler{ + collation: collation, + collationEnv: collationEnv, + dynamicTypes: typed.types, + sqlmode: sqlmode, + } typed.compiled, typed.err = comp.compile(expr) }) return typed.compiled, typed.err @@ -646,7 +648,8 @@ type UntypedExpr struct { // ir is the translated IR for the expression ir IR // collation is the default collation for the translated expression - collation collations.ID + collation collations.ID + collationEnv *collations.Environment // needTypes are the IR nodes in ir that could not be typed ahead of time: these must // necessarily be either Column or BindVariable nodes, as all other nodes can always // be statically typed. The dynamicTypeOffset field on each node is the offset of @@ -696,7 +699,7 @@ func (u *UntypedExpr) Compile(env *ExpressionEnv) (*CompiledExpr, error) { if err != nil { return nil, err } - return typed.compile(u.ir, u.collation, env.sqlmode) + return typed.compile(u.ir, u.collation, u.collationEnv, env.sqlmode) } func (u *UntypedExpr) typeof(env *ExpressionEnv) (ctype, error) { diff --git a/go/vt/vtgate/evalengine/translate_convert.go b/go/vt/vtgate/evalengine/translate_convert.go index 29216716b2b..133315e69af 100644 --- a/go/vt/vtgate/evalengine/translate_convert.go +++ b/go/vt/vtgate/evalengine/translate_convert.go @@ -32,7 +32,7 @@ func (ast *astCompiler) binaryCollationForCollation(collation collations.ID) col if binary == nil { return collations.Unknown } - return collations.Local().BinaryCollationForCharset(binary.Charset().Name()) + return ast.cfg.CollationEnv.BinaryCollationForCharset(binary.Charset().Name()) } func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (collations.ID, error) { @@ -47,7 +47,7 @@ func (ast *astCompiler) translateConvertCharset(charset string, binary bool) (co return collation, nil } charset = strings.ToLower(charset) - collationID := collations.Local().DefaultCollationForCharset(charset) + collationID := ast.cfg.CollationEnv.DefaultCollationForCharset(charset) if collationID == collations.Unknown { return collations.Unknown, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Unknown character set: '%s'", charset) } @@ -66,6 +66,7 @@ func (ast *astCompiler) translateConvertExpr(expr sqlparser.Expr, convertType *s err error ) + convert.CollationEnv = ast.cfg.CollationEnv convert.Inner, err = ast.translateExpr(expr) if err != nil { return nil, err @@ -123,6 +124,7 @@ func (ast *astCompiler) translateConvertUsingExpr(expr *sqlparser.ConvertUsingEx err error ) + using.CollationEnv = ast.cfg.CollationEnv using.Inner, err = ast.translateExpr(expr.Expr) if err != nil { return nil, err diff --git a/go/vt/vtgate/evalengine/translate_test.go b/go/vt/vtgate/evalengine/translate_test.go index 377f34db8f2..ecf569fccea 100644 --- a/go/vt/vtgate/evalengine/translate_test.go +++ b/go/vt/vtgate/evalengine/translate_test.go @@ -20,6 +20,7 @@ import ( "context" "strings" "testing" + "time" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" @@ -114,7 +115,7 @@ func TestTranslateSimplification(t *testing.T) { for _, tc := range testCases { t.Run(tc.expression, func(t *testing.T) { - stmt, err := sqlparser.Parse("select " + tc.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + tc.expression) if err != nil { t.Fatal(err) } @@ -125,7 +126,8 @@ func TestTranslateSimplification(t *testing.T) { cfg := &Config{ ResolveColumn: fields.Column, - Collation: collations.Default(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), NoConstantFolding: true, NoCompilation: true, } @@ -298,10 +300,13 @@ func TestEvaluate(t *testing.T) { for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) + sqltypesExpr, err := Translate(astExpr, &Config{ + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), + }) require.Nil(t, err) require.NotNil(t, sqltypesExpr) env := NewExpressionEnv(context.Background(), map[string]*querypb.BindVariable{ @@ -311,14 +316,14 @@ func TestEvaluate(t *testing.T) { "uint32_bind_variable": sqltypes.Uint32BindVariable(21), "uint64_bind_variable": sqltypes.Uint64BindVariable(22), "float_bind_variable": sqltypes.Float64BindVariable(2.2), - }, nil) + }, NewEmptyVCursor(collations.MySQL8(), time.Local)) // When r, err := env.Evaluate(sqltypesExpr) // Then require.NoError(t, err) - assert.Equal(t, test.expected, r.Value(collations.Default()), "expected %s", test.expected.String()) + assert.Equal(t, test.expected, r.Value(collations.MySQL8().DefaultConnectionCharset()), "expected %s", test.expected.String()) }) } } @@ -343,15 +348,19 @@ func TestEvaluateTuple(t *testing.T) { for _, test := range tests { t.Run(test.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + test.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + test.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - sqltypesExpr, err := Translate(astExpr, &Config{Collation: collations.Default()}) + collationEnv := collations.MySQL8() + sqltypesExpr, err := Translate(astExpr, &Config{ + Collation: collationEnv.DefaultConnectionCharset(), + CollationEnv: collationEnv, + }) require.Nil(t, err) require.NotNil(t, sqltypesExpr) // When - r, err := EmptyExpressionEnv().Evaluate(sqltypesExpr) + r, err := EmptyExpressionEnv(collationEnv).Evaluate(sqltypesExpr) // Then require.NoError(t, err) @@ -380,10 +389,13 @@ func TestTranslationFailures(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.expression, func(t *testing.T) { // Given - stmt, err := sqlparser.Parse("select " + testcase.expression) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expression) require.NoError(t, err) astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, &Config{Collation: collations.Default()}) + _, err = Translate(astExpr, &Config{ + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), + }) require.EqualError(t, err, testcase.expectedErr) }) } @@ -413,13 +425,17 @@ func TestCardinalityWithBindVariables(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.expr, func(t *testing.T) { err := func() error { - stmt, err := sqlparser.Parse("select " + testcase.expr) + stmt, err := sqlparser.NewTestParser().Parse("select " + testcase.expr) if err != nil { return err } astExpr := stmt.(*sqlparser.Select).SelectExprs[0].(*sqlparser.AliasedExpr).Expr - _, err = Translate(astExpr, &Config{Collation: collations.Default(), NoCompilation: true}) + _, err = Translate(astExpr, &Config{ + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), + NoCompilation: true, + }) return err }() diff --git a/go/vt/vtgate/evalengine/vm.go b/go/vt/vtgate/evalengine/vm.go index df1d6aa6405..28c3af70e0e 100644 --- a/go/vt/vtgate/evalengine/vm.go +++ b/go/vt/vtgate/evalengine/vm.go @@ -87,12 +87,12 @@ func (env *ExpressionEnv) EvaluateVM(p *CompiledExpr) (EvalResult, error) { goto err } } - return EvalResult{env.vm.stack[env.vm.sp-1]}, nil + return EvalResult{v: env.vm.stack[env.vm.sp-1], collationEnv: env.collationEnv}, nil err: if env.vm.err == errDeoptimize { e, err := p.ir.eval(env) - return EvalResult{e}, err + return EvalResult{v: e, collationEnv: env.collationEnv}, err } - return EvalResult{}, env.vm.err + return EvalResult{collationEnv: env.collationEnv}, env.vm.err } diff --git a/go/vt/vtgate/evalengine/weights_test.go b/go/vt/vtgate/evalengine/weights_test.go index 7e43315f7df..9a34e6e9e81 100644 --- a/go/vt/vtgate/evalengine/weights_test.go +++ b/go/vt/vtgate/evalengine/weights_test.go @@ -77,7 +77,7 @@ func TestTinyWeightStrings(t *testing.T) { return cmp } - cmp, err := NullsafeCompare(a, b, tc.col) + cmp, err := NullsafeCompare(a, b, collations.MySQL8(), tc.col) require.NoError(t, err) fullComparisons++ @@ -88,7 +88,7 @@ func TestTinyWeightStrings(t *testing.T) { a := items[i] b := items[i+1] - cmp, err := NullsafeCompare(a, b, tc.col) + cmp, err := NullsafeCompare(a, b, collations.MySQL8(), tc.col) require.NoError(t, err) if cmp > 0 { @@ -161,7 +161,7 @@ func TestWeightStrings(t *testing.T) { v2, err := valueToEvalCast(b.value, typ, tc.col, 0) require.NoError(t, err) - cmp, err := evalCompareNullSafe(v1, v2) + cmp, err := evalCompareNullSafe(v1, v2, collations.MySQL8()) require.NoError(t, err) if cmp > 0 { diff --git a/go/vt/vtgate/executor.go b/go/vt/vtgate/executor.go index 40c50420586..e1ea404cfa9 100644 --- a/go/vt/vtgate/executor.go +++ b/go/vt/vtgate/executor.go @@ -122,6 +122,9 @@ type Executor struct { warmingReadsPercent int warmingReadsChannel chan bool + + collEnv *collations.Environment + parser *sqlparser.Parser } var executorOnce sync.Once @@ -152,6 +155,8 @@ func NewExecutor( noScatter bool, pv plancontext.PlannerVersion, warmingReadsPercent int, + collationEnv *collations.Environment, + parser *sqlparser.Parser, ) *Executor { e := &Executor{ serv: serv, @@ -168,6 +173,8 @@ func NewExecutor( plans: plans, warmingReadsPercent: warmingReadsPercent, warmingReadsChannel: make(chan bool, warmingReadsConcurrency), + collEnv: collationEnv, + parser: parser, } vschemaacl.Init() @@ -177,6 +184,7 @@ func NewExecutor( serv: serv, cell: cell, schema: e.schemaTracker, + parser: parser, } serv.WatchSrvVSchema(ctx, cell, e.vm.VSchemaUpdate) @@ -223,7 +231,7 @@ func (e *Executor) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn } if result != nil && len(result.Rows) > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.parser.RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } @@ -357,7 +365,7 @@ func (e *Executor) StreamExecute( saveSessionStats(safeSession, srr.stmtType, srr.rowsAffected, srr.insertID, srr.rowsReturned, err) if srr.rowsReturned > warnMemoryRows { warnings.Add("ResultsExceeded", 1) - piiSafeSQL, err := sqlparser.RedactSQLQuery(sql) + piiSafeSQL, err := e.parser.RedactSQLQuery(sql) if err != nil { piiSafeSQL = logStats.StmtType } @@ -499,16 +507,20 @@ func (e *Executor) addNeededBindVars(vcursor *vcursorImpl, bindVarNeeds *sqlpars bindVars[key] = sqltypes.StringBindVariable(mysqlSocketPath()) default: if value, hasSysVar := session.SystemVariables[sysVar]; hasSysVar { - expr, err := sqlparser.ParseExpr(value) + expr, err := e.parser.ParseExpr(value) if err != nil { return err } - evalExpr, err := evalengine.Translate(expr, nil) + evalExpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: vcursor.collation, + CollationEnv: e.collEnv, + SQLMode: evalengine.ParseSQLMode(vcursor.SQLMode()), + }) if err != nil { return err } - evaluated, err := evalengine.EmptyExpressionEnv().Evaluate(evalExpr) + evaluated, err := evalengine.NewExpressionEnv(context.Background(), nil, vcursor).Evaluate(evalExpr) if err != nil { return err } @@ -1337,7 +1349,7 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, query, comments := sqlparser.SplitMarginComments(sql) vcursor, _ := newVCursorImpl(safeSession, comments, e, logStats, e.vm, e.VSchema(), e.resolver.resolver, e.serv, e.warnShardedOnly, e.pv) - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return nil, err } @@ -1372,8 +1384,8 @@ func (e *Executor) handlePrepare(ctx context.Context, safeSession *SafeSession, return qr.Fields, err } -func parseAndValidateQuery(query string) (sqlparser.Statement, *sqlparser.ReservedVars, error) { - stmt, reserved, err := sqlparser.Parse2(query) +func parseAndValidateQuery(query string, parser *sqlparser.Parser) (sqlparser.Statement, *sqlparser.ReservedVars, error) { + stmt, reserved, err := parser.Parse2(query) if err != nil { return nil, nil, err } @@ -1508,7 +1520,7 @@ func (e *Executor) ReleaseLock(ctx context.Context, session *SafeSession) error // planPrepareStmt implements the IExecutor interface func (e *Executor) planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) { - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return nil, nil, err } @@ -1541,3 +1553,11 @@ func (e *Executor) Close() { topo.Close() e.plans.Close() } + +func (e *Executor) collationEnv() *collations.Environment { + return e.collEnv +} + +func (e *Executor) sqlparser() *sqlparser.Parser { + return e.parser +} diff --git a/go/vt/vtgate/executor_dml_test.go b/go/vt/vtgate/executor_dml_test.go index 961e6e32eca..4ef598d2e61 100644 --- a/go/vt/vtgate/executor_dml_test.go +++ b/go/vt/vtgate/executor_dml_test.go @@ -25,8 +25,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" querypb "vitess.io/vitess/go/vt/proto/query" @@ -532,7 +532,7 @@ func TestUpdateMultiOwned(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) sbc1.SetResults([]*sqltypes.Result{ sqltypes.MakeTestResult( @@ -1469,7 +1469,7 @@ func TestInsertShardedAutocommitLookup(t *testing.T) { } } ` - executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema) + executor, sbc1, sbc2, sbclookup, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) _, err := executorExecSession(ctx, executor, "insert into user(id, v, name, music) values (1, 2, 'myname', 'star')", nil, &vtgatepb.Session{}) require.NoError(t, err) @@ -2268,7 +2268,7 @@ func TestInsertBadAutoInc(t *testing.T) { } } ` - executor, _, _, _, ctx := createCustomExecutor(t, vschema) + executor, _, _, _, ctx := createCustomExecutor(t, vschema, config.DefaultMySQLVersion) // If auto inc table cannot be found, the table should not be added to vschema. session := &vtgatepb.Session{ diff --git a/go/vt/vtgate/executor_framework_test.go b/go/vt/vtgate/executor_framework_test.go index 8baffdfde09..831e133770a 100644 --- a/go/vt/vtgate/executor_framework_test.go +++ b/go/vt/vtgate/executor_framework_test.go @@ -26,27 +26,25 @@ import ( "testing" "github.com/stretchr/testify/assert" - - "vitess.io/vitess/go/vt/log" - "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/vtgate/logstats" - - vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/cache/theine" - "vitess.io/vitess/go/test/utils" - "vitess.io/vitess/go/vt/vtgate/engine" - "vitess.io/vitess/go/constants/sidecar" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/key" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" + "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/logstats" "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -184,7 +182,7 @@ func createExecutorEnvCallback(t testing.TB, eachShard func(shard, ks string, ta // one-off queries from thrashing the cache. Disable the doorkeeper in the tests to prevent flakiness. plans := theine.NewStore[PlanCacheKey, *engine.Plan](queryPlanCacheMemory, false) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) key.AnyShardPicker = DestinationAnyShardPickerFirstShard{} @@ -212,7 +210,7 @@ func createExecutorEnv(t testing.TB) (executor *Executor, sbc1, sbc2, sbclookup return } -func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { +func createCustomExecutor(t testing.TB, vschema string, mysqlVersion string) (executor *Executor, sbc1, sbc2, sbclookup *sandboxconn.SandboxConn, ctx context.Context) { var cancel context.CancelFunc ctx, cancel = context.WithCancel(context.Background()) cell := "aa" @@ -231,7 +229,10 @@ func createCustomExecutor(t testing.TB, vschema string) (executor *Executor, sbc queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: mysqlVersion}) + require.NoError(t, err) + collationEnv := collations.NewEnvironment(mysqlVersion) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collationEnv, parser) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -268,7 +269,7 @@ func createCustomExecutorSetValues(t testing.TB, vschema string, values []*sqlty sbclookup = hc.AddTestTablet(cell, "0", 1, KsTestUnsharded, "0", topodatapb.TabletType_PRIMARY, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { @@ -293,7 +294,7 @@ func createExecutorEnvWithPrimaryReplicaConn(t testing.TB, ctx context.Context, replica = hc.AddTestTablet(cell, "0-replica", 1, KsTestUnsharded, "0", topodatapb.TabletType_REPLICA, true, 1, nil) queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) - executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent) + executor = NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, DefaultPlanCache(), nil, false, querypb.ExecuteOptions_Gen4, warmingReadsPercent, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) t.Cleanup(func() { diff --git a/go/vt/vtgate/executor_select_test.go b/go/vt/vtgate/executor_select_test.go index 09467e85407..af40c0e17b3 100644 --- a/go/vt/vtgate/executor_select_test.go +++ b/go/vt/vtgate/executor_select_test.go @@ -29,11 +29,10 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/logstats" - "vitess.io/vitess/go/vt/sqlparser" - "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -160,18 +159,16 @@ func TestSelectDBA(t *testing.T) { } func TestSystemVariablesMySQLBelow80(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "5.7.0") executor.normalize = true - - sqlparser.SetParserVersion("57000") setVarEnabled = true session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -196,10 +193,9 @@ func TestSystemVariablesMySQLBelow80(t *testing.T) { } func TestSystemVariablesWithSetVarDisabled(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - sqlparser.SetParserVersion("80000") setVarEnabled = false defer func() { setVarEnabled = true @@ -208,8 +204,8 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -234,11 +230,9 @@ func TestSystemVariablesWithSetVarDisabled(t *testing.T) { } func TestSetSystemVariablesTx(t *testing.T) { - executor, sbc1, _, _, _ := createExecutorEnv(t) + executor, sbc1, _, _, _ := createCustomExecutor(t, "{}", "8.0.1") executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: "TestExecutor"}) _, err := executor.Execute(context.Background(), nil, "TestBegin", session, "begin", map[string]*querypb.BindVariable{}) @@ -250,8 +244,8 @@ func TestSetSystemVariablesTx(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -285,16 +279,14 @@ func TestSetSystemVariables(t *testing.T) { executor, _, _, lookup, _ := createExecutorEnv(t) executor.normalize = true - sqlparser.SetParserVersion("80001") - session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded, SystemVariables: map[string]string{}}) // Set @@sql_mode and execute a select statement. We should have SET_VAR in the select statement lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar(""), @@ -327,7 +319,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "sql_safe_updates", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "sql_safe_updates", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("0"), @@ -350,7 +342,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("4"), @@ -373,7 +365,7 @@ func TestSetSystemVariables(t *testing.T) { lookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "max_tmp_tables", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("1"), @@ -402,8 +394,8 @@ func TestSetSystemVariablesWithReservedConnection(t *testing.T) { sbc1.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ - {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "orig", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "new", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("only_full_group_by"), @@ -614,7 +606,7 @@ func TestStreamBuffering(t *testing.T) { sbclookup.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewInt32(1), @@ -646,7 +638,7 @@ func TestStreamBuffering(t *testing.T) { wantResults := []*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "col", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, }, { Rows: [][]sqltypes.Value{{ @@ -690,7 +682,7 @@ func TestStreamLimitOffset(t *testing.T) { conn.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "weight_string(id)", Type: sqltypes.VarBinary, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_BINARY_FLAG)}, }, Rows: returnRows[shard], @@ -719,7 +711,7 @@ func TestStreamLimitOffset(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ @@ -792,14 +784,14 @@ func TestSelectSystemVariables(t *testing.T) { {Name: "@@skip_query_plan_cache", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "@@enable_system_settings", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, {Name: "@@sql_select_limit", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@transaction_mode", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "@@workload", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "@@transaction_mode", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@workload", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@read_after_write_gtid", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "@@read_after_write_timeout", Type: sqltypes.Float64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, - {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "@@session_track_gtids", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@ddl_strategy", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@migration_context", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, + {Name: "@@socket", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, {Name: "@@query_timeout", Type: sqltypes.Int64, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, }, Rows: [][]sqltypes.Value{{ @@ -884,7 +876,7 @@ func TestSelectUserDefinedVariable(t *testing.T) { require.NoError(t, err) wantResult = &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "@foo", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "@foo", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("bar"), @@ -1077,7 +1069,7 @@ func TestSelectDatabase(t *testing.T) { map[string]*querypb.BindVariable{}) wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ - {Name: "database()", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "database()", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{{ sqltypes.NewVarChar("TestExecutor@primary"), @@ -1563,7 +1555,7 @@ func TestStreamSelectIN(t *testing.T) { func createExecutor(ctx context.Context, serv *sandboxTopo, cell string, resolver *Resolver) *Executor { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + ex := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) ex.SetQueryLogger(queryLogger) return ex } @@ -1920,7 +1912,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -1954,7 +1946,7 @@ func TestSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "col1", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, InsertID: 0, } @@ -2052,7 +2044,7 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { sbc.SetResults([]*sqltypes.Result{{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, InsertID: 0, Rows: [][]sqltypes.Value{{ @@ -2080,7 +2072,7 @@ func TestStreamSelectScatterOrderByVarChar(t *testing.T) { wantResult := &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "textcol", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, } for i := 0; i < 4; i++ { @@ -3189,7 +3181,7 @@ func TestStreamOrderByLimitWithMultipleResults(t *testing.T) { } queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, serv, cell, resolver, true, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) defer executor.Close() // some sleep for all goroutines to start @@ -4124,7 +4116,7 @@ func TestSelectCFC(t *testing.T) { func TestSelectView(t *testing.T) { executor, sbc, _, _, _ := createExecutorEnv(t) // add the view to local vschema - err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id") + err := executor.vschema.AddView(KsTestSharded, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", executor.vm.parser) require.NoError(t, err) executor.normalize = true diff --git a/go/vt/vtgate/executor_set_test.go b/go/vt/vtgate/executor_set_test.go index 5377f72c66b..5e66899db44 100644 --- a/go/vt/vtgate/executor_set_test.go +++ b/go/vt/vtgate/executor_set_test.go @@ -21,8 +21,6 @@ import ( "testing" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/vt/sqlparser" - querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/test/utils" @@ -507,14 +505,9 @@ func createMap(keys []string, values []any) map[string]*querypb.BindVariable { } func TestSetVar(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{sqltypes.MakeTestResult( @@ -551,14 +544,9 @@ func TestSetVar(t *testing.T) { } func TestSetVarShowVariables(t *testing.T) { - executor, _, _, sbc, ctx := createExecutorEnv(t) + executor, _, _, sbc, ctx := createCustomExecutor(t, "{}", "8.0.0") executor.normalize = true - oldVersion := sqlparser.GetParserVersion() - sqlparser.SetParserVersion("80000") - defer func() { - sqlparser.SetParserVersion(oldVersion) - }() session := NewAutocommitSession(&vtgatepb.Session{EnableSystemSettings: true, TargetString: KsTestUnsharded}) sbc.SetResults([]*sqltypes.Result{ diff --git a/go/vt/vtgate/executor_stream_test.go b/go/vt/vtgate/executor_stream_test.go index 5ef00fd0691..076225b158c 100644 --- a/go/vt/vtgate/executor_stream_test.go +++ b/go/vt/vtgate/executor_stream_test.go @@ -21,18 +21,18 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/streamlog" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/discovery" querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/logstats" - - "vitess.io/vitess/go/vt/discovery" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/sqltypes" _ "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/sandboxconn" ) @@ -68,7 +68,7 @@ func TestStreamSQLSharded(t *testing.T) { queryLogger := streamlog.New[*logstats.LogStats]("VTGate", queryLogBufferSize) plans := DefaultPlanCache() - executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0) + executor := NewExecutor(ctx, serv, cell, resolver, false, false, testBufferSize, plans, nil, false, querypb.ExecuteOptions_Gen4, 0, collations.MySQL8(), sqlparser.NewTestParser()) executor.SetQueryLogger(queryLogger) defer executor.Close() diff --git a/go/vt/vtgate/executor_test.go b/go/vt/vtgate/executor_test.go index 38746dea779..b0ab47ebed3 100644 --- a/go/vt/vtgate/executor_test.go +++ b/go/vt/vtgate/executor_test.go @@ -103,7 +103,7 @@ func TestExecutorMaxMemoryRowsExceeded(t *testing.T) { for _, test := range testCases { sbclookup.SetResults([]*sqltypes.Result{result}) - stmt, err := sqlparser.Parse(test.query) + stmt, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = executor.Execute(ctx, nil, "TestExecutorMaxMemoryRowsExceeded", session, test.query, nil) @@ -667,7 +667,7 @@ func TestExecutorShow(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), }, } @@ -712,7 +712,7 @@ func TestExecutorShow(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), }, } @@ -763,7 +763,7 @@ func TestExecutorShow(t *testing.T) { wantqr = &sqltypes.Result{ Fields: []*querypb.Field{ {Name: "id", Type: sqltypes.Int32, Charset: collations.CollationBinaryID, Flags: uint32(querypb.MySqlFlag_NUM_FLAG)}, - {Name: "value", Type: sqltypes.VarChar, Charset: uint32(collations.Default())}, + {Name: "value", Type: sqltypes.VarChar, Charset: uint32(collations.MySQL8().DefaultConnectionCharset())}, }, Rows: [][]sqltypes.Value{ {sqltypes.NewInt32(1), sqltypes.NewVarChar("foo")}, @@ -1697,7 +1697,7 @@ func getPlanCached(t *testing.T, ctx context.Context, e *Executor, vcursor *vcur Options: &querypb.ExecuteOptions{SkipQueryPlanCache: skipQueryPlanCache}}, } - stmt, reservedVars, err := parseAndValidateQuery(sql) + stmt, reservedVars, err := parseAndValidateQuery(sql, sqlparser.NewTestParser()) require.NoError(t, err) plan, err := e.getPlan(context.Background(), vcursor, sql, stmt, comments, bindVars, reservedVars /* normalize */, e.normalize, logStats) require.NoError(t, err) @@ -1865,7 +1865,7 @@ func TestGetPlanPriority(t *testing.T) { vCursor, err := newVCursorImpl(session, makeComments(""), r, nil, r.vm, r.VSchema(), r.resolver.resolver, nil, false, pv) assert.NoError(t, err) - stmt, err := sqlparser.Parse(testCase.sql) + stmt, err := sqlparser.NewTestParser().Parse(testCase.sql) assert.NoError(t, err) crticalityFromStatement, _ := sqlparser.GetPriorityFromStatement(stmt) diff --git a/go/vt/vtgate/plan_execute.go b/go/vt/vtgate/plan_execute.go index 5d2414ac275..657a1792c26 100644 --- a/go/vt/vtgate/plan_execute.go +++ b/go/vt/vtgate/plan_execute.go @@ -80,7 +80,7 @@ func (e *Executor) newExecute( query, comments := sqlparser.SplitMarginComments(sql) // 2: Parse and Validate query - stmt, reservedVars, err := parseAndValidateQuery(query) + stmt, reservedVars, err := parseAndValidateQuery(query, e.parser) if err != nil { return err } diff --git a/go/vt/vtgate/planbuilder/builder.go b/go/vt/vtgate/planbuilder/builder.go index 4c1bfc0c547..1a982a78489 100644 --- a/go/vt/vtgate/planbuilder/builder.go +++ b/go/vt/vtgate/planbuilder/builder.go @@ -70,7 +70,7 @@ func singleTable(ks, tbl string) string { // TestBuilder builds a plan for a query based on the specified vschema. // This method is only used from tests func TestBuilder(query string, vschema plancontext.VSchema, keyspace string) (*engine.Plan, error) { - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := vschema.SQLParser().Parse2(query) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/collations_test.go b/go/vt/vtgate/planbuilder/collations_test.go index 01b86125921..7325d189e43 100644 --- a/go/vt/vtgate/planbuilder/collations_test.go +++ b/go/vt/vtgate/planbuilder/collations_test.go @@ -67,7 +67,7 @@ func (tc *collationTestCase) addCollationsToSchema(vschema *vschemawrapper.VSche func TestOrderedAggregateCollations(t *testing.T) { collid := func(collname string) collations.ID { - return collations.Local().LookupByName(collname) + return collations.MySQL8().LookupByName(collname) } testCases := []collationTestCase{ { diff --git a/go/vt/vtgate/planbuilder/ddl.go b/go/vt/vtgate/planbuilder/ddl.go index 41e5d64346e..fe5ebeb0889 100644 --- a/go/vt/vtgate/planbuilder/ddl.go +++ b/go/vt/vtgate/planbuilder/ddl.go @@ -2,6 +2,7 @@ package planbuilder import ( "context" + "errors" "fmt" "vitess.io/vitess/go/vt/key" @@ -172,7 +173,8 @@ func findTableDestinationAndKeyspace(vschema plancontext.VSchema, ddlStatement s var err error table, _, _, _, destination, err = vschema.FindTableOrVindex(ddlStatement.GetTable()) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -312,7 +314,8 @@ func buildDropTable(vschema plancontext.VSchema, ddlStatement sqlparser.DDLState table, _, _, _, destinationTab, err = vschema.FindTableOrVindex(tab) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } @@ -355,7 +358,8 @@ func buildRenameTable(vschema plancontext.VSchema, renameTable *sqlparser.Rename table, _, _, _, destinationFrom, err = vschema.FindTableOrVindex(tabPair.FromTable) if err != nil { - _, isNotFound := err.(vindexes.NotFoundError) + var notFoundError vindexes.NotFoundError + isNotFound := errors.As(err, ¬FoundError) if !isNotFound { return nil, nil, err } diff --git a/go/vt/vtgate/planbuilder/delete.go b/go/vt/vtgate/planbuilder/delete.go index e8b71ea9a0e..059c663465d 100644 --- a/go/vt/vtgate/planbuilder/delete.go +++ b/go/vt/vtgate/planbuilder/delete.go @@ -144,16 +144,7 @@ func checkIfDeleteSupported(del *sqlparser.Delete, semTable *semantics.SemTable) return semTable.NotUnshardedErr } - // Delete is only supported for a single TableExpr which is supposed to be an aliased expression - multiShardErr := vterrors.VT12001("multi-shard or vindex write statement") - if len(del.TableExprs) != 1 { - return multiShardErr - } - _, isAliasedExpr := del.TableExprs[0].(*sqlparser.AliasedTableExpr) - if !isAliasedExpr { - return multiShardErr - } - + // Delete is only supported for single Target. if len(del.Targets) > 1 { return vterrors.VT12001("multi-table DELETE statement in a sharded keyspace") } diff --git a/go/vt/vtgate/planbuilder/expression_converter.go b/go/vt/vtgate/planbuilder/expression_converter.go index 865c515ecbd..d0f6e017409 100644 --- a/go/vt/vtgate/planbuilder/expression_converter.go +++ b/go/vt/vtgate/planbuilder/expression_converter.go @@ -30,6 +30,8 @@ import ( type expressionConverter struct { tabletExpressions []sqlparser.Expr + collationEnv *collations.Environment + collation collations.ID } func booleanValues(astExpr sqlparser.Expr) evalengine.Expr { @@ -81,7 +83,10 @@ func (ec *expressionConverter) convert(astExpr sqlparser.Expr, boolean, identifi return evalExpr, nil } } - evalExpr, err := evalengine.Translate(astExpr, nil) + evalExpr, err := evalengine.Translate(astExpr, &evalengine.Config{ + Collation: ec.collation, + CollationEnv: ec.collationEnv, + }) if err != nil { if !strings.Contains(err.Error(), evalengine.ErrTranslateExprNotSupported) { return nil, err diff --git a/go/vt/vtgate/planbuilder/expression_converter_test.go b/go/vt/vtgate/planbuilder/expression_converter_test.go index 3c0c25b6003..5c65c9893b2 100644 --- a/go/vt/vtgate/planbuilder/expression_converter_test.go +++ b/go/vt/vtgate/planbuilder/expression_converter_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/evalengine" ) @@ -45,11 +46,14 @@ func TestConversion(t *testing.T) { for _, tc := range queries { t.Run(tc.expressionsIn, func(t *testing.T) { - statement, err := sqlparser.Parse("select " + tc.expressionsIn) + statement, err := sqlparser.NewTestParser().Parse("select " + tc.expressionsIn) require.NoError(t, err) slct := statement.(*sqlparser.Select) exprs := extract(slct.SelectExprs) - ec := &expressionConverter{} + ec := &expressionConverter{ + collationEnv: collations.MySQL8(), + collation: collations.MySQL8().DefaultConnectionCharset(), + } var result []evalengine.Expr for _, expr := range exprs { evalExpr, err := ec.convert(expr, false, false) diff --git a/go/vt/vtgate/planbuilder/operator_transformers.go b/go/vt/vtgate/planbuilder/operator_transformers.go index 1adcc61972a..65012e68e02 100644 --- a/go/vt/vtgate/planbuilder/operator_transformers.go +++ b/go/vt/vtgate/planbuilder/operator_transformers.go @@ -22,6 +22,7 @@ import ( "strconv" "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/slice" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/sqlparser" @@ -254,13 +255,14 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega oa := &orderedAggregate{ resultsBuilder: newResultsBuilder(plan, nil), + collationEnv: ctx.VSchema.CollationEnv(), } for _, aggr := range op.Aggregations { if aggr.OpCode == opcode.AggregateUnassigned { return nil, vterrors.VT12001(fmt.Sprintf("in scatter query: aggregation function '%s'", sqlparser.String(aggr.Original))) } - aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias) + aggrParam := engine.NewAggregateParam(aggr.OpCode, aggr.ColOffset, aggr.Alias, ctx.VSchema.CollationEnv()) aggrParam.Expr = aggr.Func aggrParam.Original = aggr.Original aggrParam.OrigOpcode = aggr.OriginalOpCode @@ -275,6 +277,7 @@ func transformAggregator(ctx *plancontext.PlanningContext, op *operators.Aggrega WeightStringCol: groupBy.WSOffset, Expr: groupBy.SimplifiedExpr, Type: typ, + CollationEnv: ctx.VSchema.CollationEnv(), }) } @@ -315,6 +318,7 @@ func createMemorySort(ctx *plancontext.PlanningContext, src logicalPlan, orderin WeightStringCol: ordering.WOffset[idx], Desc: order.Inner.Direction == sqlparser.DescOrder, Type: typ, + CollationEnv: ctx.VSchema.CollationEnv(), }) } @@ -523,7 +527,7 @@ func transformRoutePlan(ctx *plancontext.PlanningContext, op *operators.Route) ( case *sqlparser.Update: return buildUpdateLogicalPlan(ctx, op, dmlOp, stmt, hints) case *sqlparser.Delete: - return buildDeleteLogicalPlan(ctx, op, dmlOp, hints) + return buildDeleteLogicalPlan(ctx, op, dmlOp, stmt, hints) case *sqlparser.Insert: return buildInsertLogicalPlan(op, dmlOp, stmt, hints) default: @@ -542,6 +546,7 @@ func buildRouteLogicalPlan(ctx *plancontext.PlanningContext, op *operators.Route WeightStringCol: order.WOffset, Desc: order.Direction == sqlparser.DescOrder, Type: typ, + CollationEnv: ctx.VSchema.CollationEnv(), }) } if err != nil { @@ -684,24 +689,20 @@ func buildUpdateLogicalPlan( return &primitiveWrapper{prim: e}, nil } -func buildDeleteLogicalPlan( - ctx *plancontext.PlanningContext, - rb *operators.Route, - dmlOp operators.Operator, - hints *queryHints, -) (logicalPlan, error) { +func buildDeleteLogicalPlan(ctx *plancontext.PlanningContext, rb *operators.Route, dmlOp operators.Operator, stmt *sqlparser.Delete, hints *queryHints) (logicalPlan, error) { del := dmlOp.(*operators.Delete) rp := newRoutingParams(ctx, rb.Routing.OpCode()) rb.Routing.UpdateRoutingParams(ctx, rp) + vtable := del.Target.VTable edml := &engine.DML{ - Query: generateQuery(del.AST), - TableNames: []string{del.VTable.Name.String()}, - Vindexes: del.VTable.Owned, + Query: generateQuery(stmt), + TableNames: []string{vtable.Name.String()}, + Vindexes: vtable.Owned, OwnedVindexQuery: del.OwnedVindexQuery, RoutingParameters: rp, } - transformDMLPlan(del.VTable, edml, rb.Routing, del.OwnedVindexQuery != "") + transformDMLPlan(vtable, edml, rb.Routing, del.OwnedVindexQuery != "") e := &engine.Delete{ DML: edml, @@ -805,19 +806,23 @@ func transformLimit(ctx *plancontext.PlanningContext, op *operators.Limit) (logi return nil, err } - return createLimit(plan, op.AST) + return createLimit(plan, op.AST, ctx.VSchema.CollationEnv()) } -func createLimit(input logicalPlan, limit *sqlparser.Limit) (logicalPlan, error) { +func createLimit(input logicalPlan, limit *sqlparser.Limit, collationEnv *collations.Environment) (logicalPlan, error) { plan := newLimit(input) - pv, err := evalengine.Translate(limit.Rowcount, nil) + cfg := &evalengine.Config{ + Collation: collationEnv.DefaultConnectionCharset(), + CollationEnv: collationEnv, + } + pv, err := evalengine.Translate(limit.Rowcount, cfg) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in LIMIT") } plan.elimit.Count = pv if limit.Offset != nil { - pv, err = evalengine.Translate(limit.Offset, nil) + pv, err = evalengine.Translate(limit.Offset, cfg) if err != nil { return nil, vterrors.Wrap(err, "unexpected expression in OFFSET") } @@ -862,7 +867,7 @@ func transformHashJoin(ctx *plancontext.PlanningContext, op *operators.HashJoin) fmt.Sprintf("missing type information for [%s]", strings.Join(missingTypes, ", "))) } - comparisonType, err := evalengine.CoerceTypes(ltyp, rtyp) + comparisonType, err := evalengine.CoerceTypes(ltyp, rtyp, ctx.VSchema.CollationEnv()) if err != nil { return nil, err } @@ -878,6 +883,7 @@ func transformHashJoin(ctx *plancontext.PlanningContext, op *operators.HashJoin) ASTPred: op.JoinPredicate(), Collation: comparisonType.Collation(), ComparisonType: comparisonType.Type(), + CollationEnv: ctx.VSchema.CollationEnv(), }, }, nil } diff --git a/go/vt/vtgate/planbuilder/operators/SQL_builder.go b/go/vt/vtgate/planbuilder/operators/SQL_builder.go index 961a7d252ff..1a9ef3c77c1 100644 --- a/go/vt/vtgate/planbuilder/operators/SQL_builder.go +++ b/go/vt/vtgate/planbuilder/operators/SQL_builder.go @@ -309,7 +309,7 @@ func (ts *tableSorter) Swap(i, j int) { func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: - sqlparser.RemoveKeyspaceFromColName(expr.Expr) + sqlparser.RemoveKeyspace(expr.Expr) case *sqlparser.StarExpr: expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") } @@ -376,7 +376,7 @@ func buildQuery(op Operator, qb *queryBuilder) { case *Update: buildUpdate(op, qb) case *Delete: - buildDML(op, qb) + buildDelete(op, qb) case *Insert: buildDML(op, qb) default: @@ -384,6 +384,28 @@ func buildQuery(op Operator, qb *queryBuilder) { } } +func buildDelete(op *Delete, qb *queryBuilder) { + buildQuery(op.Source, qb) + // currently the qb builds a select query underneath. + // Will take the `From` and `Where` from this select + // and create a delete statement. + // TODO: change it to directly produce `delete` statement. + sel, ok := qb.stmt.(*sqlparser.Select) + if !ok { + panic(vterrors.VT13001("expected a select here")) + } + + qb.dmlOperator = op + qb.stmt = &sqlparser.Delete{ + Ignore: sqlparser.Ignore(op.Ignore), + Targets: sqlparser.TableNames{op.Target.Name}, + TableExprs: sel.From, + Where: sel.Where, + OrderBy: op.OrderBy, + Limit: op.Limit, + } +} + func buildUpdate(op *Update, qb *queryBuilder) { tblName := sqlparser.NewTableName(op.QTable.Table.Name.String()) aTblExpr := &sqlparser.AliasedTableExpr{ diff --git a/go/vt/vtgate/planbuilder/operators/ast_to_op.go b/go/vt/vtgate/planbuilder/operators/ast_to_op.go index 63dec0c84a8..7a4758493b2 100644 --- a/go/vt/vtgate/planbuilder/operators/ast_to_op.go +++ b/go/vt/vtgate/planbuilder/operators/ast_to_op.go @@ -72,7 +72,7 @@ func addWherePredicates(ctx *plancontext.PlanningContext, expr sqlparser.Expr, o outerID := TableID(op) exprs := sqlparser.SplitAndExpression(nil, expr) for _, expr := range exprs { - sqlparser.RemoveKeyspaceFromColName(expr) + sqlparser.RemoveKeyspace(expr) subq := sqc.handleSubquery(ctx, expr, outerID) if subq != nil { continue diff --git a/go/vt/vtgate/planbuilder/operators/delete.go b/go/vt/vtgate/planbuilder/operators/delete.go index 17f6125992f..f9934306f38 100644 --- a/go/vt/vtgate/planbuilder/operators/delete.go +++ b/go/vt/vtgate/planbuilder/operators/delete.go @@ -21,43 +21,54 @@ import ( "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) type Delete struct { - QTable *QueryTable - VTable *vindexes.Table + Target TargetTable OwnedVindexQuery string - AST *sqlparser.Delete + OrderBy sqlparser.OrderBy + Limit *sqlparser.Limit + Ignore bool + Source Operator - noInputs noColumns noPredicates } +type TargetTable struct { + ID semantics.TableSet + VTable *vindexes.Table + Name sqlparser.TableName +} + // Introduces implements the PhysicalOperator interface func (d *Delete) introducesTableID() semantics.TableSet { - return d.QTable.ID + return d.Target.ID } // Clone implements the Operator interface -func (d *Delete) Clone([]Operator) Operator { - return &Delete{ - QTable: d.QTable, - VTable: d.VTable, - OwnedVindexQuery: d.OwnedVindexQuery, - AST: d.AST, +func (d *Delete) Clone(inputs []Operator) Operator { + newD := *d + newD.SetInputs(inputs) + return &newD +} + +func (d *Delete) Inputs() []Operator { + return []Operator{d.Source} +} + +func (d *Delete) SetInputs(inputs []Operator) { + if len(inputs) != 1 { + panic(vterrors.VT13001("unexpected number of inputs to Delete operator")) } + d.Source = inputs[0] } func (d *Delete) TablesUsed() []string { - if d.VTable != nil { - return SingleQualifiedIdentifier(d.VTable.Keyspace, d.VTable.Name) - } - return nil + return SingleQualifiedIdentifier(d.Target.VTable.Keyspace, d.Target.VTable.Name) } func (d *Delete) GetOrdering(*plancontext.PlanningContext) []OrderBy { @@ -65,23 +76,27 @@ func (d *Delete) GetOrdering(*plancontext.PlanningContext) []OrderBy { } func (d *Delete) ShortDescription() string { - return fmt.Sprintf("%s.%s %s", d.VTable.Keyspace.Name, d.VTable.Name.String(), sqlparser.String(d.AST.Where)) -} + limit := "" + orderBy := "" + if d.Limit != nil { + limit = " " + sqlparser.String(d.Limit) + } + if len(d.OrderBy) > 0 { + orderBy = " " + sqlparser.String(d.OrderBy) + } -func (d *Delete) Statement() sqlparser.Statement { - return d.AST + return fmt.Sprintf("%s.%s%s%s", d.Target.VTable.Keyspace.Name, d.Target.VTable.Name.String(), orderBy, limit) } -func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) Operator { - tableInfo, qt := createQueryTableForDML(ctx, deleteStmt.TableExprs[0], deleteStmt.Where) - vindexTable, routing := buildVindexTableForDML(ctx, tableInfo, qt, "delete") - +func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlparser.Delete) (op Operator) { delClone := sqlparser.CloneRefOfDelete(deleteStmt) - // Create the delete operator first. - delOp := createDeleteOperator(ctx, deleteStmt, qt, vindexTable, routing) + + delOp := createDeleteOperator(ctx, deleteStmt) + op = delOp + if deleteStmt.Comments != nil { - delOp = &LockAndComment{ - Source: delOp, + op = &LockAndComment{ + Source: op, Comments: deleteStmt.Comments, } } @@ -89,70 +104,97 @@ func createOperatorFromDelete(ctx *plancontext.PlanningContext, deleteStmt *sqlp childFks := ctx.SemTable.GetChildForeignKeysList() // If there are no foreign key constraints, then we don't need to do anything. if len(childFks) == 0 { - return delOp + return } // If the delete statement has a limit, we don't support it yet. - if deleteStmt.Limit != nil { + if delClone.Limit != nil { panic(vterrors.VT12001("foreign keys management at vitess with limit")) } - return createFkCascadeOpForDelete(ctx, delOp, delClone, childFks) + return createFkCascadeOpForDelete(ctx, op, delClone, childFks, delOp.Target.VTable) } -func createDeleteOperator( - ctx *plancontext.PlanningContext, - deleteStmt *sqlparser.Delete, - qt *QueryTable, - vindexTable *vindexes.Table, - routing Routing) Operator { - del := &Delete{ - QTable: qt, - VTable: vindexTable, - AST: deleteStmt, - } - route := &Route{ - Source: del, - Routing: routing, - } +func createDeleteOperator(ctx *plancontext.PlanningContext, del *sqlparser.Delete) *Delete { + op := crossJoin(ctx, del.TableExprs) - if !vindexTable.Keyspace.Sharded { - return route + if del.Where != nil { + op = addWherePredicates(ctx, del.Where.Expr, op) } - primaryVindex, vindexAndPredicates := getVindexInformation(qt.ID, vindexTable) - - tr, ok := routing.(*ShardedRouting) - if ok { - tr.VindexPreds = vindexAndPredicates + target := del.Targets[0] + tblID, exists := ctx.SemTable.Targets[target.Name] + if !exists { + panic(vterrors.VT13001("delete target table should be part of semantic analyzer")) } - - var ovq string - if len(vindexTable.Owned) > 0 { - tblExpr := &sqlparser.AliasedTableExpr{Expr: sqlparser.TableName{Name: vindexTable.Name}, As: qt.Alias.As} - ovq = generateOwnedVindexQuery(tblExpr, deleteStmt, vindexTable, primaryVindex.Columns) + tblInfo, err := ctx.SemTable.TableInfoFor(tblID) + if err != nil { + panic(err) } - del.OwnedVindexQuery = ovq + vTbl := tblInfo.GetVindexTable() + // Reference table should delete from the source table. + if vTbl.Type == vindexes.TypeReference && vTbl.Source != nil { + vTbl = updateQueryGraphWithSource(ctx, op, tblID, vTbl) + } - sqc := &SubQueryBuilder{} - for _, predicate := range qt.Predicates { - subq := sqc.handleSubquery(ctx, predicate, qt.ID) - if subq != nil { - continue + var ovq string + if vTbl.Keyspace.Sharded && vTbl.Type == vindexes.TypeTable { + primaryVindex, _ := getVindexInformation(tblID, vTbl) + ate := tblInfo.GetAliasedTableExpr() + if len(vTbl.Owned) > 0 { + ovq = generateOwnedVindexQuery(ate, del, vTbl, primaryVindex.Columns) } + } - routing = UpdateRoutingLogic(ctx, predicate, routing) + name, err := tblInfo.Name() + if err != nil { + panic(err) } - if routing.OpCode() == engine.Scatter && deleteStmt.Limit != nil { - // TODO systay: we should probably check for other op code types - IN could also hit multiple shards (2022-04-07) - panic(vterrors.VT12001("multi shard DELETE with LIMIT")) + return &Delete{ + Target: TargetTable{ + ID: tblID, + VTable: vTbl, + Name: name, + }, + Source: op, + Ignore: bool(del.Ignore), + Limit: del.Limit, + OrderBy: del.OrderBy, + OwnedVindexQuery: ovq, } +} - return sqc.getRootOperator(route, nil) +func updateQueryGraphWithSource(ctx *plancontext.PlanningContext, input Operator, tblID semantics.TableSet, vTbl *vindexes.Table) *vindexes.Table { + sourceTable, _, _, _, _, err := ctx.VSchema.FindTableOrVindex(vTbl.Source.TableName) + if err != nil { + panic(err) + } + vTbl = sourceTable + TopDown(input, TableID, func(op Operator, lhsTables semantics.TableSet, isRoot bool) (Operator, *ApplyResult) { + qg, ok := op.(*QueryGraph) + if !ok { + return op, NoRewrite + } + if len(qg.Tables) > 1 { + panic(vterrors.VT12001("DELETE on reference table with join")) + } + for _, tbl := range qg.Tables { + if tbl.ID != tblID { + continue + } + tbl.Alias = sqlparser.NewAliasedTableExpr(sqlparser.NewTableName(vTbl.Name.String()), tbl.Alias.As.String()) + tbl.Table, _ = tbl.Alias.TableName() + } + return op, Rewrote("change query table point to source table") + }, func(operator Operator) VisitRule { + _, ok := operator.(*QueryGraph) + return VisitRule(ok) + }) + return vTbl } -func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo) Operator { +func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Operator, delStmt *sqlparser.Delete, childFks []vindexes.ChildFKInfo, deletedTbl *vindexes.Table) Operator { var fkChildren []*FkChild var selectExprs []sqlparser.SelectExpr for _, fk := range childFks { @@ -169,7 +211,7 @@ func createFkCascadeOpForDelete(ctx *plancontext.PlanningContext, parentOp Opera fkChildren = append(fkChildren, createFkChildForDelete(ctx, fk, offsets)) } - selectionOp := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, sqlparser.ForUpdateLockNoWait) + selectionOp := createSelectionOp(ctx, selectExprs, delStmt.TableExprs, delStmt.Where, nil, nil, getUpdateLock(deletedTbl)) return &FkCascade{ Selection: selectionOp, diff --git a/go/vt/vtgate/planbuilder/operators/distinct.go b/go/vt/vtgate/planbuilder/operators/distinct.go index 1750846a961..655bf2350cc 100644 --- a/go/vt/vtgate/planbuilder/operators/distinct.go +++ b/go/vt/vtgate/planbuilder/operators/distinct.go @@ -62,9 +62,10 @@ func (d *Distinct) planOffsets(ctx *plancontext.PlanningContext) Operator { } d.Columns = append(d.Columns, engine.CheckCol{ - Col: idx, - WsCol: wsCol, - Type: typ, + Col: idx, + WsCol: wsCol, + Type: typ, + CollationEnv: ctx.VSchema.CollationEnv(), }) } return nil diff --git a/go/vt/vtgate/planbuilder/operators/dml_planning.go b/go/vt/vtgate/planbuilder/operators/dml_planning.go index 3140142858c..b2e59f7b6be 100644 --- a/go/vt/vtgate/planbuilder/operators/dml_planning.go +++ b/go/vt/vtgate/planbuilder/operators/dml_planning.go @@ -80,8 +80,9 @@ func buildChangedVindexesValues( } found = true pv, err := evalengine.Translate(assignment.Expr.EvalExpr, &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(invalidUpdateExpr(assignment.Name.Name.String(), assignment.Expr.EvalExpr)) diff --git a/go/vt/vtgate/planbuilder/operators/filter.go b/go/vt/vtgate/planbuilder/operators/filter.go index f2171c43a1b..5c4c33f4575 100644 --- a/go/vt/vtgate/planbuilder/operators/filter.go +++ b/go/vt/vtgate/planbuilder/operators/filter.go @@ -119,8 +119,9 @@ func (f *Filter) Compact(*plancontext.PlanningContext) (Operator, *ApplyResult) func (f *Filter) planOffsets(ctx *plancontext.PlanningContext) Operator { cfg := &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), } predicate := sqlparser.AndExpressions(f.Predicates...) diff --git a/go/vt/vtgate/planbuilder/operators/fuzz.go b/go/vt/vtgate/planbuilder/operators/fuzz.go index 6ee6b0bab83..c92810e3ae8 100644 --- a/go/vt/vtgate/planbuilder/operators/fuzz.go +++ b/go/vt/vtgate/planbuilder/operators/fuzz.go @@ -30,7 +30,7 @@ func FuzzAnalyse(data []byte) int { if err != nil { return 0 } - tree, err := sqlparser.Parse(query) + tree, err := sqlparser.NewTestParser().Parse(query) if err != nil { return -1 } diff --git a/go/vt/vtgate/planbuilder/operators/hash_join.go b/go/vt/vtgate/planbuilder/operators/hash_join.go index fed0633fbe3..56fe3f61fc8 100644 --- a/go/vt/vtgate/planbuilder/operators/hash_join.go +++ b/go/vt/vtgate/planbuilder/operators/hash_join.go @@ -332,8 +332,9 @@ func (hj *HashJoin) addColumn(ctx *plancontext.PlanningContext, in sqlparser.Exp rewrittenExpr := sqlparser.CopyOnRewrite(in, pre, r.post, ctx.SemTable.CopySemanticInfo).(sqlparser.Expr) cfg := &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), } eexpr, err := evalengine.Translate(rewrittenExpr, cfg) if err != nil { diff --git a/go/vt/vtgate/planbuilder/operators/hash_join_test.go b/go/vt/vtgate/planbuilder/operators/hash_join_test.go index 6abe43c05df..038c2d65da0 100644 --- a/go/vt/vtgate/planbuilder/operators/hash_join_test.go +++ b/go/vt/vtgate/planbuilder/operators/hash_join_test.go @@ -22,9 +22,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/test/vschemawrapper" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" + "vitess.io/vitess/go/vt/vtgate/vindexes" ) func TestJoinPredicates(t *testing.T) { @@ -59,7 +61,13 @@ func TestJoinPredicates(t *testing.T) { func TestOffsetPlanning(t *testing.T) { lcol1, lcol2 := sqlparser.NewColName("lhs1"), sqlparser.NewColName("lhs2") rcol1, rcol2 := sqlparser.NewColName("rhs1"), sqlparser.NewColName("rhs2") - ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} + ctx := &plancontext.PlanningContext{ + SemTable: semantics.EmptySemTable(), + VSchema: &vschemawrapper.VSchemaWrapper{ + V: &vindexes.VSchema{}, + SysVarEnabled: true, + }, + } lid := semantics.SingleTableSet(0) rid := semantics.SingleTableSet(1) ctx.SemTable.Recursive[lcol1] = lid diff --git a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go index f7de09c4857..fd4f51d2ca4 100644 --- a/go/vt/vtgate/planbuilder/operators/info_schema_planning.go +++ b/go/vt/vtgate/planbuilder/operators/info_schema_planning.go @@ -41,12 +41,13 @@ type InfoSchemaRouting struct { Table *QueryTable } -func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext, rp *engine.RoutingParameters) { +func (isr *InfoSchemaRouting) UpdateRoutingParams(ctx *plancontext.PlanningContext, rp *engine.RoutingParameters) { rp.SysTableTableSchema = nil for _, expr := range isr.SysTableTableSchema { eexpr, err := evalengine.Translate(expr, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(err) @@ -59,6 +60,7 @@ func (isr *InfoSchemaRouting) UpdateRoutingParams(_ *plancontext.PlanningContext eexpr, err := evalengine.Translate(expr, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(err) @@ -132,6 +134,7 @@ func extractInfoSchemaRoutingPredicate(ctx *plancontext.PlanningContext, in sqlp _, err := evalengine.Translate(rhs, &evalengine.Config{ Collation: collations.SystemCollation.Collation, ResolveColumn: NotImplementedSchemaInfoResolver, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { // if we can't translate this to an evalengine expression, diff --git a/go/vt/vtgate/planbuilder/operators/insert.go b/go/vt/vtgate/planbuilder/operators/insert.go index 194ef198772..a47214cb004 100644 --- a/go/vt/vtgate/planbuilder/operators/insert.go +++ b/go/vt/vtgate/planbuilder/operators/insert.go @@ -506,8 +506,9 @@ func insertRowsPlan(ctx *plancontext.PlanningContext, insOp *Insert, ins *sqlpar colNum, _ := findOrAddColumn(ins, col) for rowNum, row := range rows { innerpv, err := evalengine.Translate(row[colNum], &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(err) @@ -636,8 +637,9 @@ func modifyForAutoinc(ctx *plancontext.PlanningContext, ins *sqlparser.Insert, v } var err error gen.Values, err = evalengine.Translate(autoIncValues, &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(err) diff --git a/go/vt/vtgate/planbuilder/operators/join.go b/go/vt/vtgate/planbuilder/operators/join.go index 35bf26f9793..42ec1b75562 100644 --- a/go/vt/vtgate/planbuilder/operators/join.go +++ b/go/vt/vtgate/planbuilder/operators/join.go @@ -92,7 +92,7 @@ func createOuterJoin(tableExpr *sqlparser.JoinTableExpr, lhs, rhs Operator) Oper panic(vterrors.VT12001("subquery in outer join predicate")) } predicate := tableExpr.Condition.On - sqlparser.RemoveKeyspaceFromColName(predicate) + sqlparser.RemoveKeyspace(predicate) return &Join{LHS: lhs, RHS: rhs, LeftJoin: true, Predicate: predicate} } @@ -115,7 +115,7 @@ func createInnerJoin(ctx *plancontext.PlanningContext, tableExpr *sqlparser.Join sqc := &SubQueryBuilder{} outerID := TableID(op) joinPredicate := tableExpr.Condition.On - sqlparser.RemoveKeyspaceFromColName(joinPredicate) + sqlparser.RemoveKeyspace(joinPredicate) exprs := sqlparser.SplitAndExpression(nil, joinPredicate) for _, pred := range exprs { subq := sqc.handleSubquery(ctx, pred, outerID) diff --git a/go/vt/vtgate/planbuilder/operators/projection.go b/go/vt/vtgate/planbuilder/operators/projection.go index b416e369ca2..9523578abbc 100644 --- a/go/vt/vtgate/planbuilder/operators/projection.go +++ b/go/vt/vtgate/planbuilder/operators/projection.go @@ -554,8 +554,9 @@ func (p *Projection) planOffsets(ctx *plancontext.PlanningContext) Operator { // for everything else, we'll turn to the evalengine eexpr, err := evalengine.Translate(rewritten, &evalengine.Config{ - ResolveType: ctx.SemTable.TypeForExpr, - Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { panic(err) diff --git a/go/vt/vtgate/planbuilder/operators/query_planning.go b/go/vt/vtgate/planbuilder/operators/query_planning.go index 19f6f3bf27d..b2d51c2935e 100644 --- a/go/vt/vtgate/planbuilder/operators/query_planning.go +++ b/go/vt/vtgate/planbuilder/operators/query_planning.go @@ -21,6 +21,7 @@ import ( "io" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" ) @@ -96,6 +97,8 @@ func runRewriters(ctx *plancontext.PlanningContext, root Operator) Operator { return optimizeQueryGraph(ctx, in) case *LockAndComment: return pushLockAndComment(in) + case *Delete: + return tryPushDelete(in) default: return in, NoRewrite } @@ -104,6 +107,32 @@ func runRewriters(ctx *plancontext.PlanningContext, root Operator) Operator { return FixedPointBottomUp(root, TableID, visitor, stopAtRoute) } +func tryPushDelete(in *Delete) (Operator, *ApplyResult) { + switch src := in.Source.(type) { + case *Route: + if in.Limit != nil && !src.IsSingleShardOrByDestination() { + panic(vterrors.VT12001("multi shard DELETE with LIMIT")) + } + + switch r := src.Routing.(type) { + case *SequenceRouting: + // Sequences are just unsharded routes + src.Routing = &AnyShardRouting{ + keyspace: r.keyspace, + } + case *AnyShardRouting: + // References would have an unsharded source + // Alternates are not required. + r.Alternates = nil + } + return Swap(in, src, "pushed delete under route") + case *ApplyJoin: + panic(vterrors.VT12001("multi shard DELETE with join table references")) + } + + return in, nil +} + func pushLockAndComment(l *LockAndComment) (Operator, *ApplyResult) { switch src := l.Source.(type) { case *Horizon, *QueryGraph: diff --git a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go index 517b169bcf8..4495efeab3c 100644 --- a/go/vt/vtgate/planbuilder/operators/queryprojection_test.go +++ b/go/vt/vtgate/planbuilder/operators/queryprojection_test.go @@ -79,7 +79,7 @@ func TestQP(t *testing.T) { ctx := &plancontext.PlanningContext{SemTable: semantics.EmptySemTable()} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - stmt, err := sqlparser.Parse(tcase.sql) + stmt, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) sel := stmt.(*sqlparser.Select) @@ -193,7 +193,7 @@ func TestQPSimplifiedExpr(t *testing.T) { for _, tc := range testCases { t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) + ast, err := sqlparser.NewTestParser().Parse(tc.query) require.NoError(t, err) sel := ast.(*sqlparser.Select) _, err = semantics.Analyze(sel, "", &semantics.FakeSI{}) diff --git a/go/vt/vtgate/planbuilder/operators/route.go b/go/vt/vtgate/planbuilder/operators/route.go index d5eee19e5dd..952e455abb0 100644 --- a/go/vt/vtgate/planbuilder/operators/route.go +++ b/go/vt/vtgate/planbuilder/operators/route.go @@ -19,8 +19,11 @@ package operators import ( "fmt" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/slice" + "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" @@ -118,7 +121,7 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r } nr := &NoneRouting{keyspace: ks} - if isConstantFalse(expr) { + if isConstantFalse(expr, ctx.VSchema.ConnCollation(), ctx.VSchema.CollationEnv()) { return nr } @@ -162,9 +165,12 @@ func UpdateRoutingLogic(ctx *plancontext.PlanningContext, expr sqlparser.Expr, r // isConstantFalse checks whether this predicate can be evaluated at plan-time. If it returns `false` or `null`, // we know that the query will not return anything, and this can be used to produce better plans -func isConstantFalse(expr sqlparser.Expr) bool { - eenv := evalengine.EmptyExpressionEnv() - eexpr, err := evalengine.Translate(expr, nil) +func isConstantFalse(expr sqlparser.Expr, collation collations.ID, collationEnv *collations.Environment) bool { + eenv := evalengine.EmptyExpressionEnv(collationEnv) + eexpr, err := evalengine.Translate(expr, &evalengine.Config{ + Collation: collation, + CollationEnv: collationEnv, + }) if err != nil { return false } @@ -172,7 +178,7 @@ func isConstantFalse(expr sqlparser.Expr) bool { if err != nil { return false } - if eres.Value(collations.Default()).IsNull() { + if eres.Value(collation).IsNull() { return false } b, err := eres.ToBooleanStrict() @@ -275,6 +281,14 @@ func (r *Route) IsSingleShard() bool { return false } +func (r *Route) IsSingleShardOrByDestination() bool { + switch r.Routing.OpCode() { + case engine.Unsharded, engine.DBA, engine.Next, engine.EqualUnique, engine.Reference, engine.ByDestination: + return true + } + return false +} + func tupleAccess(expr sqlparser.Expr, coordinates []int) sqlparser.Expr { tuple, _ := expr.(sqlparser.ValTuple) for _, idx := range coordinates { @@ -372,23 +386,55 @@ func findVSchemaTableAndCreateRoute( solves semantics.TableSet, planAlternates bool, ) *Route { - vschemaTable, _, _, _, target, err := ctx.VSchema.FindTableOrVindex(tableName) - if target != nil { - panic(vterrors.VT09017("SELECT with a target destination is not allowed")) - } + vschemaTable, _, _, tabletType, target, err := ctx.VSchema.FindTableOrVindex(tableName) if err != nil { panic(err) } + targeted := createTargetedRouting(ctx, target, tabletType, vschemaTable) + return createRouteFromVSchemaTable( ctx, queryTable, vschemaTable, solves, planAlternates, + targeted, ) } +func createTargetedRouting(ctx *plancontext.PlanningContext, target key.Destination, tabletType topodatapb.TabletType, vschemaTable *vindexes.Table) Routing { + switch ctx.Statement.(type) { + case *sqlparser.Update: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("update")) + } + case *sqlparser.Delete: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("delete")) + } + case *sqlparser.Insert: + if tabletType != topodatapb.TabletType_PRIMARY { + panic(vterrors.VT09002("insert")) + } + if target != nil { + panic(vterrors.VT09017("INSERT with a target destination is not allowed")) + } + case sqlparser.SelectStatement: + if target != nil { + panic(vterrors.VT09017("SELECT with a target destination is not allowed")) + } + } + + if target != nil { + return &TargetedRouting{ + keyspace: vschemaTable.Keyspace, + TargetDestination: target, + } + } + return nil +} + // createRouteFromTable creates a route from the given VSchema table. func createRouteFromVSchemaTable( ctx *plancontext.PlanningContext, @@ -396,6 +442,7 @@ func createRouteFromVSchemaTable( vschemaTable *vindexes.Table, solves semantics.TableSet, planAlternates bool, + targeted Routing, ) *Route { if vschemaTable.Name.String() != queryTable.Table.Name.String() { // we are dealing with a routed table @@ -420,8 +467,14 @@ func createRouteFromVSchemaTable( }, } - // We create the appropiate Routing struct here, depending on the type of table we are dealing with. - routing := createRoutingForVTable(vschemaTable, solves) + // We create the appropriate Routing struct here, depending on the type of table we are dealing with. + var routing Routing + if targeted != nil { + routing = targeted + } else { + routing = createRoutingForVTable(vschemaTable, solves) + } + for _, predicate := range queryTable.Predicates { routing = UpdateRoutingLogic(ctx, predicate, routing) } diff --git a/go/vt/vtgate/planbuilder/operators/route_planning.go b/go/vt/vtgate/planbuilder/operators/route_planning.go index cb33f4e1f55..07dbab3bc90 100644 --- a/go/vt/vtgate/planbuilder/operators/route_planning.go +++ b/go/vt/vtgate/planbuilder/operators/route_planning.go @@ -138,6 +138,7 @@ func generateOwnedVindexQuery(tblExpr sqlparser.TableExpr, del *sqlparser.Delete buf.Myprintf(", %v", column) } } + sqlparser.RemoveKeyspaceInTables(tblExpr) buf.Myprintf(" from %v%v%v%v for update", tblExpr, del.Where, del.OrderBy, del.Limit) return buf.String() } diff --git a/go/vt/vtgate/planbuilder/operators/sharded_routing.go b/go/vt/vtgate/planbuilder/operators/sharded_routing.go index 239ae9ce419..0bde10946fb 100644 --- a/go/vt/vtgate/planbuilder/operators/sharded_routing.go +++ b/go/vt/vtgate/planbuilder/operators/sharded_routing.go @@ -608,8 +608,9 @@ func tryMergeJoinShardedRouting( func makeEvalEngineExpr(ctx *plancontext.PlanningContext, n sqlparser.Expr) evalengine.Expr { for _, expr := range ctx.SemTable.GetExprAndEqualities(n) { ee, _ := evalengine.Translate(expr, &evalengine.Config{ - Collation: ctx.SemTable.Collation, - ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + CollationEnv: ctx.VSchema.CollationEnv(), }) if ee != nil { return ee diff --git a/go/vt/vtgate/planbuilder/operators/subquery_builder.go b/go/vt/vtgate/planbuilder/operators/subquery_builder.go index 6540ed10701..e582295ba91 100644 --- a/go/vt/vtgate/planbuilder/operators/subquery_builder.go +++ b/go/vt/vtgate/planbuilder/operators/subquery_builder.go @@ -201,7 +201,7 @@ func (sqb *SubQueryBuilder) inspectWhere( outerID: sqb.outerID, } for _, predicate := range sqlparser.SplitAndExpression(nil, in.Expr) { - sqlparser.RemoveKeyspaceFromColName(predicate) + sqlparser.RemoveKeyspace(predicate) subq := sqb.handleSubquery(ctx, predicate, sqb.totalID) if subq != nil { continue diff --git a/go/vt/vtgate/planbuilder/operators/table.go b/go/vt/vtgate/planbuilder/operators/table.go index 93b406232b2..bf03243bb81 100644 --- a/go/vt/vtgate/planbuilder/operators/table.go +++ b/go/vt/vtgate/planbuilder/operators/table.go @@ -115,7 +115,7 @@ func addColumn(ctx *plancontext.PlanningContext, op ColNameColumns, e sqlparser. if !ok { panic(vterrors.VT09018(fmt.Sprintf("cannot add '%s' expression to a table/vindex", sqlparser.String(e)))) } - sqlparser.RemoveKeyspaceFromColName(col) + sqlparser.RemoveKeyspace(col) cols := op.GetColNames() colAsExpr := func(c *sqlparser.ColName) sqlparser.Expr { return c } if offset, found := canReuseColumn(ctx, cols, e, colAsExpr); found { diff --git a/go/vt/vtgate/planbuilder/operators/update.go b/go/vt/vtgate/planbuilder/operators/update.go index f780e5405db..6c51418d054 100644 --- a/go/vt/vtgate/planbuilder/operators/update.go +++ b/go/vt/vtgate/planbuilder/operators/update.go @@ -255,7 +255,7 @@ func createFKCascadeOp(ctx *plancontext.PlanningContext, parentOp Operator, updS fkChildren = append(fkChildren, fkChild) } - selectionOp := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, sqlparser.ForUpdateLockNoWait) + selectionOp := createSelectionOp(ctx, selectExprs, updStmt.TableExprs, updStmt.Where, updStmt.OrderBy, nil, getUpdateLock(updatedTable)) return &FkCascade{ Selection: selectionOp, @@ -658,7 +658,21 @@ func createFkVerifyOpForParentFKForUpdate(ctx *plancontext.PlanningContext, upda sqlparser.NewWhere(sqlparser.WhereClause, whereCond), nil, sqlparser.NewLimitWithoutOffset(1), - sqlparser.ForShareLockNoWait) + getVerifyLock(updatedTable)) +} + +func getVerifyLock(vTbl *vindexes.Table) sqlparser.Lock { + if len(vTbl.UniqueKeys) > 0 { + return sqlparser.ForShareLockNoWait + } + return sqlparser.ForShareLock +} + +func getUpdateLock(vTbl *vindexes.Table) sqlparser.Lock { + if len(vTbl.UniqueKeys) > 0 { + return sqlparser.ForUpdateLockNoWait + } + return sqlparser.ForUpdateLock } // Each child foreign key constraint is verified by a join query of the form: @@ -728,7 +742,7 @@ func createFkVerifyOpForChildFKForUpdate(ctx *plancontext.PlanningContext, updat sqlparser.NewWhere(sqlparser.WhereClause, whereCond), nil, sqlparser.NewLimitWithoutOffset(1), - sqlparser.ForShareLockNoWait) + getVerifyLock(updatedTable)) } // nullSafeNotInComparison is used to compare the child columns in the foreign key constraint aren't the same as the updateExpressions exactly. diff --git a/go/vt/vtgate/planbuilder/ordered_aggregate.go b/go/vt/vtgate/planbuilder/ordered_aggregate.go index 34646fa3dea..c6a37c8decb 100644 --- a/go/vt/vtgate/planbuilder/ordered_aggregate.go +++ b/go/vt/vtgate/planbuilder/ordered_aggregate.go @@ -17,6 +17,7 @@ limitations under the License. package planbuilder import ( + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -60,6 +61,8 @@ type orderedAggregate struct { groupByKeys []*engine.GroupByParams truncateColumnCount int + + collationEnv *collations.Environment } // Primitive implements the logicalPlan interface @@ -78,6 +81,7 @@ func (oa *orderedAggregate) Primitive() engine.Primitive { GroupByKeys: oa.groupByKeys, TruncateColumnCount: oa.truncateColumnCount, Input: input, + CollationEnv: oa.collationEnv, } } diff --git a/go/vt/vtgate/planbuilder/plan_test.go b/go/vt/vtgate/planbuilder/plan_test.go index 247777117b5..6ebd71dcf1b 100644 --- a/go/vt/vtgate/planbuilder/plan_test.go +++ b/go/vt/vtgate/planbuilder/plan_test.go @@ -530,7 +530,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch if err != nil { t.Fatal(err) } - vschema := vindexes.BuildVSchema(formal) + vschema := vindexes.BuildVSchema(formal, sqlparser.NewTestParser()) if err != nil { t.Fatal(err) } @@ -541,9 +541,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch // adding view in user keyspace if ks.Keyspace.Name == "user" { - if err = vschema.AddView(ks.Keyspace.Name, - "user_details_view", - "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id"); err != nil { + if err = vschema.AddView(ks.Keyspace.Name, "user_details_view", "select user.id, user_extra.col from user join user_extra on user.id = user_extra.user_id", sqlparser.NewTestParser()); err != nil { t.Fatal(err) } } @@ -566,7 +564,7 @@ func loadSchema(t testing.TB, filename string, setCollation bool) *vindexes.VSch // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ @@ -732,7 +730,7 @@ func exerciseAnalyzer(query, database string, s semantics.SchemaInformation) { recover() }() - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) if err != nil { return } diff --git a/go/vt/vtgate/planbuilder/plancontext/planning_context.go b/go/vt/vtgate/planbuilder/plancontext/planning_context.go index 68ccc95b9fd..3871c8fdbc4 100644 --- a/go/vt/vtgate/planbuilder/plancontext/planning_context.go +++ b/go/vt/vtgate/planbuilder/plancontext/planning_context.go @@ -49,11 +49,13 @@ type PlanningContext struct { // CurrentPhase keeps track of how far we've gone in the planning process // The type should be operators.Phase, but depending on that would lead to circular dependencies CurrentPhase int + + // Statement contains the originally parsed statement + Statement sqlparser.Statement } func CreatePlanningContext(stmt sqlparser.Statement, reservedVars *sqlparser.ReservedVars, - vschema VSchema, version querypb.ExecuteOptions_PlannerVersion, ) (*PlanningContext, error) { @@ -78,6 +80,7 @@ func CreatePlanningContext(stmt sqlparser.Statement, SkipPredicates: map[sqlparser.Expr]any{}, PlannerVersion: version, ReservedArguments: map[sqlparser.Expr]string{}, + Statement: stmt, }, nil } diff --git a/go/vt/vtgate/planbuilder/plancontext/vschema.go b/go/vt/vtgate/planbuilder/plancontext/vschema.go index 286e8d30b67..d6cb26c45eb 100644 --- a/go/vt/vtgate/planbuilder/plancontext/vschema.go +++ b/go/vt/vtgate/planbuilder/plancontext/vschema.go @@ -41,6 +41,7 @@ type VSchema interface { Planner() PlannerVersion SetPlannerVersion(pv PlannerVersion) ConnCollation() collations.ID + CollationEnv() *collations.Environment // ErrorIfShardedF will return an error if the keyspace is sharded, // and produce a warning if the vtgate if configured to do so @@ -91,6 +92,9 @@ type VSchema interface { // StorePrepareData stores the prepared data in the session. StorePrepareData(name string, v *vtgatepb.PrepareData) + + // SQLParser returns the proper sqlparser instance with the right version. + SQLParser() *sqlparser.Parser } // PlannerNameToVersion returns the numerical representation of the planner diff --git a/go/vt/vtgate/planbuilder/planner_test.go b/go/vt/vtgate/planbuilder/planner_test.go index 38c579502fe..2601615522f 100644 --- a/go/vt/vtgate/planbuilder/planner_test.go +++ b/go/vt/vtgate/planbuilder/planner_test.go @@ -58,7 +58,7 @@ func TestBindingSubquery(t *testing.T) { } for _, testcase := range testcases { t.Run(testcase.query, func(t *testing.T) { - parse, err := sqlparser.Parse(testcase.query) + parse, err := sqlparser.NewTestParser().Parse(testcase.query) require.NoError(t, err) selStmt := parse.(*sqlparser.Select) semTable, err := semantics.Analyze(selStmt, "d", &semantics.FakeSI{ diff --git a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go index 369a99bf5d3..62931388d70 100644 --- a/go/vt/vtgate/planbuilder/predicate_rewrite_test.go +++ b/go/vt/vtgate/planbuilder/predicate_rewrite_test.go @@ -103,17 +103,19 @@ func TestFuzzRewriting(t *testing.T) { simplified := sqlparser.RewritePredicate(predicate) original, err := evalengine.Translate(predicate, &evalengine.Config{ - Collation: collations.Default(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), ResolveColumn: resolveForFuzz, }) require.NoError(t, err) simpler, err := evalengine.Translate(simplified.(sqlparser.Expr), &evalengine.Config{ - Collation: collations.Default(), + Collation: collations.MySQL8().DefaultConnectionCharset(), + CollationEnv: collations.MySQL8(), ResolveColumn: resolveForFuzz, }) require.NoError(t, err) - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collations.MySQL8()) env.Row = make([]sqltypes.Value, tc.nodes) for i := range env.Row { env.Row[i] = sqltypes.NewInt32(1) @@ -139,7 +141,7 @@ func testValues(t *testing.T, env *evalengine.ExpressionEnv, i int, original, si require.NoError(t, err) v2, err := env.Evaluate(simpler) require.NoError(t, err) - assert.Equal(t, v1.Value(collations.Default()), v2.Value(collations.Default())) + assert.Equal(t, v1.Value(collations.MySQL8().DefaultConnectionCharset()), v2.Value(collations.MySQL8().DefaultConnectionCharset())) if len(env.Row) > i+1 { testValues(t, env, i+1, original, simpler) } diff --git a/go/vt/vtgate/planbuilder/rewrite_test.go b/go/vt/vtgate/planbuilder/rewrite_test.go index 292c94f448a..87c8985fd63 100644 --- a/go/vt/vtgate/planbuilder/rewrite_test.go +++ b/go/vt/vtgate/planbuilder/rewrite_test.go @@ -82,7 +82,7 @@ func TestHavingRewrite(t *testing.T) { } func prepTest(t *testing.T, sql string) (*semantics.SemTable, *sqlparser.ReservedVars, *sqlparser.Select) { - ast, vars, err := sqlparser.Parse2(sql) + ast, vars, err := sqlparser.NewTestParser().Parse2(sql) require.NoError(t, err) sel, isSelectStatement := ast.(*sqlparser.Select) diff --git a/go/vt/vtgate/planbuilder/route.go b/go/vt/vtgate/planbuilder/route.go index 63f6d0ea612..c03ab8c8801 100644 --- a/go/vt/vtgate/planbuilder/route.go +++ b/go/vt/vtgate/planbuilder/route.go @@ -73,7 +73,7 @@ func (rb *route) Wireup(ctx *plancontext.PlanningContext) error { } query, args := planableVindex.Query() - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := ctx.VSchema.SQLParser().Parse2(query) if err != nil { return err } diff --git a/go/vt/vtgate/planbuilder/select.go b/go/vt/vtgate/planbuilder/select.go index 77c883325c5..a94e3c1ae53 100644 --- a/go/vt/vtgate/planbuilder/select.go +++ b/go/vt/vtgate/planbuilder/select.go @@ -130,7 +130,7 @@ func buildSQLCalcFoundRowsPlan( return nil, nil, err } - statement2, reserved2, err := sqlparser.Parse2(originalQuery) + statement2, reserved2, err := vschema.SQLParser().Parse2(originalQuery) if err != nil { return nil, nil, err } @@ -289,7 +289,10 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi if isLFunc { elem := &engine.LockFunc{Typ: expr.Expr.(*sqlparser.LockingFunc)} if lFunc.Name != nil { - n, err := evalengine.Translate(lFunc.Name, nil) + n, err := evalengine.Translate(lFunc.Name, &evalengine.Config{ + Collation: vschema.ConnCollation(), + CollationEnv: vschema.CollationEnv(), + }) if err != nil { return nil, err } @@ -301,7 +304,10 @@ func handleDualSelects(sel *sqlparser.Select, vschema plancontext.VSchema) (engi if len(lockFunctions) > 0 { return nil, vterrors.VT12001(fmt.Sprintf("LOCK function and other expression: [%s] in same select query", sqlparser.String(expr))) } - exprs[i], err = evalengine.Translate(expr.Expr, &evalengine.Config{Collation: vschema.ConnCollation()}) + exprs[i], err = evalengine.Translate(expr.Expr, &evalengine.Config{ + Collation: vschema.ConnCollation(), + CollationEnv: vschema.CollationEnv(), + }) if err != nil { return nil, nil } diff --git a/go/vt/vtgate/planbuilder/set.go b/go/vt/vtgate/planbuilder/set.go index 43d85ee5113..33c0812a6cb 100644 --- a/go/vt/vtgate/planbuilder/set.go +++ b/go/vt/vtgate/planbuilder/set.go @@ -55,7 +55,10 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult var setOps []engine.SetOp var err error - ec := new(expressionConverter) + ec := &expressionConverter{ + collationEnv: vschema.CollationEnv(), + collation: vschema.ConnCollation(), + } for _, expr := range stmt.Exprs { // AST struct has been prepared before getting here, so no scope here means that @@ -80,7 +83,7 @@ func buildSetPlan(stmt *sqlparser.Set, vschema plancontext.VSchema) (*planResult } setOps = append(setOps, setOp) case sqlparser.NextTxScope, sqlparser.SessionScope: - planFunc, err := sysvarPlanningFuncs.Get(expr) + planFunc, err := sysvarPlanningFuncs.Get(expr, vschema.CollationEnv(), vschema.SQLParser()) if err != nil { return nil, err } diff --git a/go/vt/vtgate/planbuilder/show.go b/go/vt/vtgate/planbuilder/show.go index 2a8b11fb70e..2955e1f0fca 100644 --- a/go/vt/vtgate/planbuilder/show.go +++ b/go/vt/vtgate/planbuilder/show.go @@ -561,10 +561,11 @@ func buildShowVGtidPlan(show *sqlparser.ShowBasic, vschema plancontext.VSchema) } return &engine.OrderedAggregate{ Aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed"), + engine.NewAggregateParam(popcode.AggregateGtid, 1, "global vgtid_executed", vschema.CollationEnv()), }, TruncateColumnCount: 2, Input: send, + CollationEnv: vschema.CollationEnv(), }, nil } diff --git a/go/vt/vtgate/planbuilder/show_test.go b/go/vt/vtgate/planbuilder/show_test.go index b36133bb1c7..f68622e7a27 100644 --- a/go/vt/vtgate/planbuilder/show_test.go +++ b/go/vt/vtgate/planbuilder/show_test.go @@ -50,7 +50,7 @@ func TestBuildDBPlan(t *testing.T) { for _, s := range testCases { t.Run(s.query, func(t *testing.T) { - parserOut, err := sqlparser.Parse(s.query) + parserOut, err := sqlparser.NewTestParser().Parse(s.query) require.NoError(t, err) show := parserOut.(*sqlparser.Show) @@ -76,7 +76,7 @@ func TestGenerateCharsetRows(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), } rows2 := [][]sqltypes.Value{ @@ -88,7 +88,7 @@ func TestGenerateCharsetRows(t *testing.T) { append(buildVarCharRow( "utf8mb4", "UTF-8 Unicode", - collations.Local().LookupName(collations.Default())), + collations.MySQL8().LookupName(collations.MySQL8().DefaultConnectionCharset())), sqltypes.NewUint32(4)), } @@ -110,7 +110,7 @@ func TestGenerateCharsetRows(t *testing.T) { for _, tc := range testcases { t.Run(tc.input, func(t *testing.T) { - stmt, err := sqlparser.Parse(tc.input) + stmt, err := sqlparser.NewTestParser().Parse(tc.input) require.NoError(t, err) match := stmt.(*sqlparser.Show).Internal.(*sqlparser.ShowBasic) filter := match.Filter diff --git a/go/vt/vtgate/planbuilder/simplifier_test.go b/go/vt/vtgate/planbuilder/simplifier_test.go index 56d310d2949..e13fef7ae70 100644 --- a/go/vt/vtgate/planbuilder/simplifier_test.go +++ b/go/vt/vtgate/planbuilder/simplifier_test.go @@ -41,7 +41,7 @@ func TestSimplifyBuggyQuery(t *testing.T) { V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) @@ -63,7 +63,7 @@ func TestSimplifyPanic(t *testing.T) { V: loadSchema(t, "vschemas/schema.json", true), Version: Gen4, } - stmt, reserved, err := sqlparser.Parse2(query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(query) require.NoError(t, err) rewritten, _ := sqlparser.RewriteAST(sqlparser.CloneStatement(stmt), vschema.CurrentDb(), sqlparser.SQLSelectLimitUnset, "", nil, nil, nil) reservedVars := sqlparser.NewReservedVars("vtg", reserved) @@ -88,7 +88,7 @@ func TestUnsupportedFile(t *testing.T) { for _, tcase := range readJSONTests("unsupported_cases.txt") { t.Run(tcase.Query, func(t *testing.T) { log.Errorf("unsupported_cases.txt - %s", tcase.Query) - stmt, reserved, err := sqlparser.Parse2(tcase.Query) + stmt, reserved, err := sqlparser.NewTestParser().Parse2(tcase.Query) require.NoError(t, err) _, ok := stmt.(sqlparser.SelectStatement) if !ok { @@ -104,7 +104,7 @@ func TestUnsupportedFile(t *testing.T) { reservedVars := sqlparser.NewReservedVars("vtg", reserved) ast := rewritten.AST origQuery := sqlparser.String(ast) - stmt, _, _ = sqlparser.Parse2(tcase.Query) + stmt, _, _ = sqlparser.NewTestParser().Parse2(tcase.Query) simplified := simplifier.SimplifyStatement( stmt.(sqlparser.SelectStatement), vschema.CurrentDb(), @@ -125,7 +125,7 @@ func TestUnsupportedFile(t *testing.T) { } func keepSameError(query string, reservedVars *sqlparser.ReservedVars, vschema *vschemawrapper.VSchemaWrapper, needs *sqlparser.BindVarNeeds) func(statement sqlparser.SelectStatement) bool { - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err) } @@ -164,7 +164,7 @@ func keepPanicking(query string, reservedVars *sqlparser.ReservedVars, vschema * return false } - stmt, _, err := sqlparser.Parse2(query) + stmt, _, err := sqlparser.NewTestParser().Parse2(query) if err != nil { panic(err.Error()) } diff --git a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go index daf19ced859..e3999c0703d 100644 --- a/go/vt/vtgate/planbuilder/single_sharded_shortcut.go +++ b/go/vt/vtgate/planbuilder/single_sharded_shortcut.go @@ -20,11 +20,10 @@ import ( "sort" "strings" - "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" - "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" + "vitess.io/vitess/go/vt/vtgate/planbuilder/operators" + "vitess.io/vitess/go/vt/vtgate/planbuilder/plancontext" "vitess.io/vitess/go/vt/vtgate/semantics" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -106,7 +105,7 @@ func getTableNames(semTable *semantics.SemTable) ([]sqlparser.TableName, error) func removeKeyspaceFromSelectExpr(expr sqlparser.SelectExpr) { switch expr := expr.(type) { case *sqlparser.AliasedExpr: - sqlparser.RemoveKeyspaceFromColName(expr.Expr) + sqlparser.RemoveKeyspace(expr.Expr) case *sqlparser.StarExpr: expr.TableName.Qualifier = sqlparser.NewIdentifierCS("") } diff --git a/go/vt/vtgate/planbuilder/system_variables.go b/go/vt/vtgate/planbuilder/system_variables.go index eccb263c65a..454445eeb32 100644 --- a/go/vt/vtgate/planbuilder/system_variables.go +++ b/go/vt/vtgate/planbuilder/system_variables.go @@ -20,6 +20,7 @@ import ( "fmt" "sync" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/sysvars" "vitess.io/vitess/go/vt/vterrors" @@ -27,8 +28,10 @@ import ( ) type sysvarPlanCache struct { - funcs map[string]planFunc - once sync.Once + funcs map[string]planFunc + once sync.Once + collationEnv *collations.Environment + parser *sqlparser.Parser } func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVariable, f func(setting) planFunc) { @@ -53,21 +56,26 @@ func (pc *sysvarPlanCache) initForSettings(systemVariables []sysvars.SystemVaria } func (pc *sysvarPlanCache) parseAndBuildDefaultValue(sysvar sysvars.SystemVariable) evalengine.Expr { - stmt, err := sqlparser.Parse(fmt.Sprintf("select %s", sysvar.Default)) + stmt, err := pc.parser.Parse(fmt.Sprintf("select %s", sysvar.Default)) if err != nil { panic(fmt.Sprintf("bug in set plan init - default value for %s not parsable: %s", sysvar.Name, sysvar.Default)) } sel := stmt.(*sqlparser.Select) aliasedExpr := sel.SelectExprs[0].(*sqlparser.AliasedExpr) - def, err := evalengine.Translate(aliasedExpr.Expr, nil) + def, err := evalengine.Translate(aliasedExpr.Expr, &evalengine.Config{ + Collation: pc.collationEnv.DefaultConnectionCharset(), + CollationEnv: pc.collationEnv, + }) if err != nil { panic(fmt.Sprintf("bug in set plan init - default value for %s not able to convert to evalengine.Expr: %s", sysvar.Name, sysvar.Default)) } return def } -func (pc *sysvarPlanCache) init() { +func (pc *sysvarPlanCache) init(collationEnv *collations.Environment, parser *sqlparser.Parser) { pc.once.Do(func() { + pc.collationEnv = collationEnv + pc.parser = parser pc.funcs = make(map[string]planFunc) pc.initForSettings(sysvars.ReadOnly, buildSetOpReadOnly) pc.initForSettings(sysvars.IgnoreThese, buildSetOpIgnore) @@ -80,8 +88,8 @@ func (pc *sysvarPlanCache) init() { var sysvarPlanningFuncs sysvarPlanCache -func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr) (planFunc, error) { - pc.init() +func (pc *sysvarPlanCache) Get(expr *sqlparser.SetExpr, collationEnv *collations.Environment, parser *sqlparser.Parser) (planFunc, error) { + pc.init(collationEnv, parser) pf, ok := pc.funcs[expr.Var.Name.Lowered()] if !ok { return nil, vterrors.VT05006(sqlparser.String(expr)) diff --git a/go/vt/vtgate/planbuilder/testdata/dml_cases.json b/go/vt/vtgate/planbuilder/testdata/dml_cases.json index eb257064afd..5ca6b034d24 100644 --- a/go/vt/vtgate/planbuilder/testdata/dml_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/dml_cases.json @@ -4926,6 +4926,126 @@ "user.music" ] } + }, + { + "comment": "delete from reference table - query send to source table", + "query": "delete from user.ref_with_source where col = 1", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref_with_source where col = 1", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Unsharded", + "Keyspace": { + "Name": "main", + "Sharded": false + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from source_of_ref where col = 1", + "Table": "source_of_ref" + }, + "TablesUsed": [ + "main.source_of_ref" + ] + } + }, + { + "comment": "delete from reference table - no source", + "query": "delete from user.ref", + "plan": { + "QueryType": "DELETE", + "Original": "delete from user.ref", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Reference", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "Query": "delete from ref", + "Table": "ref" + }, + "TablesUsed": [ + "user.ref" + ] + } + }, + { + "comment": "delete by target destination with limit", + "query": "delete from `user[-]`.`user` limit 20", + "plan": { + "QueryType": "DELETE", + "Original": "delete from `user[-]`.`user` limit 20", + "Instructions": { + "OperatorType": "Delete", + "Variant": "ByDestination", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` limit 20 for update", + "Query": "delete from `user` limit 20", + "Table": "user" + }, + "TablesUsed": [ + "user.user" + ] + } + }, + { + "comment": "delete sharded table with join with reference table", + "query": "delete u from user u join ref_with_source r on u.col = r.col", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join ref_with_source r on u.col = r.col", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u for update", + "Query": "delete u from `user` as u, ref_with_source as r where u.col = r.col", + "Table": "user" + }, + "TablesUsed": [ + "user.ref_with_source", + "user.user" + ] + } + }, + { + "comment": "delete sharded table with join with another sharded table on vindex column", + "query": "delete u from user u join music m on u.id = m.user_id", + "plan": { + "QueryType": "DELETE", + "Original": "delete u from user u join music m on u.id = m.user_id", + "Instructions": { + "OperatorType": "Delete", + "Variant": "Scatter", + "Keyspace": { + "Name": "user", + "Sharded": true + }, + "TargetTabletType": "PRIMARY", + "KsidLength": 1, + "KsidVindex": "user_index", + "OwnedVindexQuery": "select Id, `Name`, Costly from `user` as u for update", + "Query": "delete u from `user` as u, music as m where u.id = m.user_id", + "Table": "user" + }, + "TablesUsed": [ + "user.music", + "user.user" + ] + } } - ] diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json index 3ab6ef96118..15ac4acf872 100644 --- a/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_cases.json @@ -82,7 +82,7 @@ "Sharded": true }, "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", - "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update nowait", + "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", "Table": "multicol_tbl1", "Values": [ "1", @@ -155,7 +155,7 @@ "Sharded": true }, "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 for update nowait", + "Query": "select col5, t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -312,7 +312,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update nowait", + "Query": "select col2 from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -423,7 +423,7 @@ "Sharded": true }, "FieldQuery": "select t5col5 from tbl5 where 1 != 1", - "Query": "select t5col5 from tbl5 for update nowait", + "Query": "select t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -527,7 +527,7 @@ "Sharded": true }, "FieldQuery": "select 1 from tbl10 where 1 != 1", - "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share nowait", + "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share", "Table": "tbl10" }, { @@ -538,7 +538,7 @@ "Sharded": true }, "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", - "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share nowait", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share", "Table": "tbl3" } ] @@ -592,7 +592,7 @@ "Sharded": true }, "FieldQuery": "select col9 from tbl9 where 1 != 1", - "Query": "select col9 from tbl9 where col9 = 34 for update nowait", + "Query": "select col9 from tbl9 where col9 = 34 for update", "Table": "tbl9", "Values": [ "34" @@ -657,7 +657,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 for update nowait", + "Query": "select col1 from u_tbl1 for update", "Table": "u_tbl1" }, { @@ -677,7 +677,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -806,7 +806,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 and u_tbl1.col1 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 and u_tbl1.col1 is null limit 1 for share", "Table": "u_tbl1, u_tbl2" }, { @@ -822,7 +822,7 @@ "Sharded": false }, "FieldQuery": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where 1 != 1", - "Query": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update nowait", + "Query": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -889,7 +889,7 @@ "Sharded": false }, "FieldQuery": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where 1 != 1", - "Query": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update nowait", + "Query": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -916,7 +916,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1047,7 +1047,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update nowait", + "Query": "select col2 from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -1104,7 +1104,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where id = 1 for update nowait", + "Query": "select col1 from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -1124,7 +1124,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1281,7 +1281,7 @@ "Sharded": true }, "FieldQuery": "select tbl3.colx from tbl3 where 1 != 1", - "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share nowait", + "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share", "Table": "tbl3" }, { @@ -1292,7 +1292,7 @@ "Sharded": true }, "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", - "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share nowait", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share", "Table": "tbl1" } ] @@ -1361,7 +1361,7 @@ "Sharded": true }, "FieldQuery": "select 1 from tbl3 where 1 != 1", - "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share nowait", + "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share", "Table": "tbl3" }, { @@ -1372,7 +1372,7 @@ "Sharded": true }, "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", - "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share nowait", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share", "Table": "tbl1" } ] @@ -1421,7 +1421,7 @@ "Sharded": false }, "FieldQuery": "select col6 from u_tbl6 where 1 != 1", - "Query": "select col6 from u_tbl6 for update nowait", + "Query": "select col6 from u_tbl6 for update", "Table": "u_tbl6" }, { @@ -1497,7 +1497,7 @@ "Sharded": false }, "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update nowait", + "Query": "select col7 from u_tbl7 for update", "Table": "u_tbl7" }, { @@ -1517,7 +1517,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 for share", "Table": "u_tbl3, u_tbl4" }, { @@ -1529,7 +1529,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in ((cast('foo' as CHAR))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share nowait", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in ((cast('foo' as CHAR))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share", "Table": "u_tbl4, u_tbl9" }, { @@ -1586,7 +1586,7 @@ "Sharded": false }, "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update nowait", + "Query": "select col7 from u_tbl7 for update", "Table": "u_tbl7" }, { @@ -1606,7 +1606,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:v1 as CHAR) is not null and u_tbl3.col3 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:v1 as CHAR) is not null and u_tbl3.col3 is null limit 1 for share", "Table": "u_tbl3, u_tbl4" }, { @@ -1618,7 +1618,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share nowait", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share", "Table": "u_tbl4, u_tbl9" }, { @@ -1693,7 +1693,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where id = 1 for update nowait", + "Query": "select col1 from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -1713,7 +1713,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1869,7 +1869,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where (id) in ((1)) for update nowait", + "Query": "select col1 from u_tbl1 where (id) in ((1)) for update", "Table": "u_tbl1" }, { @@ -1889,7 +1889,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1975,7 +1975,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update nowait", + "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update", "Table": "u_multicol_tbl1" }, { @@ -1996,7 +1996,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update nowait", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update", "Table": "u_multicol_tbl2" }, { @@ -2069,7 +2069,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update nowait", + "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update", "Table": "u_multicol_tbl1" }, { @@ -2090,7 +2090,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update nowait", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update", "Table": "u_multicol_tbl2" }, { @@ -2169,7 +2169,7 @@ "Sharded": true }, "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 where id = :v1 for update nowait", + "Query": "select col5, t5col5 from tbl5 where id = :v1 for update", "Table": "tbl5" }, { @@ -2249,7 +2249,7 @@ "Sharded": false }, "FieldQuery": "select col7, col7 <=> cast(baz + 1 + col7 as CHAR), cast(baz + 1 + col7 as CHAR) from u_tbl7 where 1 != 1", - "Query": "select col7, col7 <=> cast(baz + 1 + col7 as CHAR), cast(baz + 1 + col7 as CHAR) from u_tbl7 where bar = 42 for update nowait", + "Query": "select col7, col7 <=> cast(baz + 1 + col7 as CHAR), cast(baz + 1 + col7 as CHAR) from u_tbl7 where bar = 42 for update", "Table": "u_tbl7" }, { @@ -2276,7 +2276,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:fkc_upd as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:fkc_upd as CHAR) where not (u_tbl4.col4) <=> (cast(:fkc_upd as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:fkc_upd as CHAR) is not null and u_tbl3.col3 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:fkc_upd as CHAR) where not (u_tbl4.col4) <=> (cast(:fkc_upd as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:fkc_upd as CHAR) is not null and u_tbl3.col3 is null limit 1 for share", "Table": "u_tbl3, u_tbl4" }, { @@ -2288,7 +2288,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:fkc_upd as CHAR) is null or (u_tbl9.col9) not in ((cast(:fkc_upd as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share nowait", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:fkc_upd as CHAR) is null or (u_tbl9.col9) not in ((cast(:fkc_upd as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share", "Table": "u_tbl4, u_tbl9" }, { @@ -2345,7 +2345,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb, cola <=> cola + 3, cola + 3 from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb, cola <=> cola + 3, cola + 3 from u_multicol_tbl1 where id = 3 for update nowait", + "Query": "select cola, colb, cola <=> cola + 3, cola + 3 from u_multicol_tbl1 where id = 3 for update", "Table": "u_multicol_tbl1" }, { @@ -2373,7 +2373,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:fkc_upd is null or (cola) not in ((:fkc_upd))) for update nowait", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:fkc_upd is null or (cola) not in ((:fkc_upd))) for update", "Table": "u_multicol_tbl2" }, { @@ -2451,7 +2451,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_multicol_tbl2 left join u_multicol_tbl1 on u_multicol_tbl1.cola = 2 and u_multicol_tbl1.colb = u_multicol_tbl2.colc - 2 where 1 != 1", - "Query": "select 1 from u_multicol_tbl2 left join u_multicol_tbl1 on u_multicol_tbl1.cola = 2 and u_multicol_tbl1.colb = u_multicol_tbl2.colc - 2 where u_multicol_tbl2.colc - 2 is not null and not (u_multicol_tbl2.cola, u_multicol_tbl2.colb) <=> (2, u_multicol_tbl2.colc - 2) and u_multicol_tbl2.id = 7 and u_multicol_tbl1.cola is null and u_multicol_tbl1.colb is null limit 1 for share nowait", + "Query": "select 1 from u_multicol_tbl2 left join u_multicol_tbl1 on u_multicol_tbl1.cola = 2 and u_multicol_tbl1.colb = u_multicol_tbl2.colc - 2 where u_multicol_tbl2.colc - 2 is not null and not (u_multicol_tbl2.cola, u_multicol_tbl2.colb) <=> (2, u_multicol_tbl2.colc - 2) and u_multicol_tbl2.id = 7 and u_multicol_tbl1.cola is null and u_multicol_tbl1.colb is null limit 1 for share", "Table": "u_multicol_tbl1, u_multicol_tbl2" }, { @@ -2467,7 +2467,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb, cola <=> 2, 2, colb <=> colc - 2, colc - 2 from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb, cola <=> 2, 2, colb <=> colc - 2, colc - 2 from u_multicol_tbl2 where id = 7 for update nowait", + "Query": "select cola, colb, cola <=> 2, 2, colb <=> colc - 2, colc - 2 from u_multicol_tbl2 where id = 7 for update", "Table": "u_multicol_tbl2" }, { @@ -2707,7 +2707,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where id = 1 for update nowait", + "Query": "select col1 from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -2727,7 +2727,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -2871,7 +2871,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = :v1 for update nowait", + "Query": "select col2 from u_tbl2 where id = :v1 for update", "Table": "u_tbl2" }, { @@ -2930,7 +2930,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = :v3 for update nowait", + "Query": "select col2 from u_tbl2 where id = :v3 for update", "Table": "u_tbl2" }, { @@ -2989,7 +2989,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = :v5 for update nowait", + "Query": "select col2 from u_tbl2 where id = :v5 for update", "Table": "u_tbl2" }, { diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json index 9fd0563f703..baa82c89dfd 100644 --- a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_off_cases.json @@ -111,7 +111,7 @@ "Sharded": true }, "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", - "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update nowait", + "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", "Table": "multicol_tbl1", "Values": [ "1", @@ -258,7 +258,7 @@ "Sharded": true }, "FieldQuery": "select t5col5 from tbl5 where 1 != 1", - "Query": "select t5col5 from tbl5 for update nowait", + "Query": "select t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -366,7 +366,7 @@ "Sharded": true }, "FieldQuery": "select col9 from tbl9 where 1 != 1", - "Query": "select col9 from tbl9 where col9 = 34 for update nowait", + "Query": "select col9 from tbl9 where col9 = 34 for update", "Table": "tbl9", "Values": [ "34" diff --git a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json index e382a83a0ad..24775f5076f 100644 --- a/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/foreignkey_checks_on_cases.json @@ -82,7 +82,7 @@ "Sharded": true }, "FieldQuery": "select colb, cola, y, colc, x from multicol_tbl1 where 1 != 1", - "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update nowait", + "Query": "select colb, cola, y, colc, x from multicol_tbl1 where cola = 1 and colb = 2 and colc = 3 for update", "Table": "multicol_tbl1", "Values": [ "1", @@ -155,7 +155,7 @@ "Sharded": true }, "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 for update nowait", + "Query": "select col5, t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -312,7 +312,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update nowait", + "Query": "select col2 from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -423,7 +423,7 @@ "Sharded": true }, "FieldQuery": "select t5col5 from tbl5 where 1 != 1", - "Query": "select t5col5 from tbl5 for update nowait", + "Query": "select t5col5 from tbl5 for update", "Table": "tbl5" }, { @@ -527,7 +527,7 @@ "Sharded": true }, "FieldQuery": "select 1 from tbl10 where 1 != 1", - "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share nowait", + "Query": "select 1 from tbl10 where not (tbl10.col) <=> ('foo') for share", "Table": "tbl10" }, { @@ -538,7 +538,7 @@ "Sharded": true }, "FieldQuery": "select tbl3.col from tbl3 where 1 != 1", - "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share nowait", + "Query": "select tbl3.col from tbl3 where tbl3.col = 'foo' for share", "Table": "tbl3" } ] @@ -592,7 +592,7 @@ "Sharded": true }, "FieldQuery": "select col9 from tbl9 where 1 != 1", - "Query": "select col9 from tbl9 where col9 = 34 for update nowait", + "Query": "select col9 from tbl9 where col9 = 34 for update", "Table": "tbl9", "Values": [ "34" @@ -657,7 +657,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 for update nowait", + "Query": "select col1 from u_tbl1 for update", "Table": "u_tbl1" }, { @@ -677,7 +677,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -806,7 +806,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 and u_tbl1.col1 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl2 left join u_tbl1 on u_tbl1.col1 = cast(u_tbl2.col1 + 'bar' as CHAR) where cast(u_tbl2.col1 + 'bar' as CHAR) is not null and not (u_tbl2.col2) <=> (cast(u_tbl2.col1 + 'bar' as CHAR)) and u_tbl2.id = 1 and u_tbl1.col1 is null limit 1 for share", "Table": "u_tbl1, u_tbl2" }, { @@ -822,7 +822,7 @@ "Sharded": false }, "FieldQuery": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where 1 != 1", - "Query": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update nowait", + "Query": "select col2, col2 <=> cast(col1 + 'bar' as CHAR), cast(col1 + 'bar' as CHAR) from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -889,7 +889,7 @@ "Sharded": false }, "FieldQuery": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where 1 != 1", - "Query": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update nowait", + "Query": "select col1, col1 <=> cast(x + 'bar' as CHAR), cast(x + 'bar' as CHAR) from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -916,7 +916,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1047,7 +1047,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where id = 1 for update nowait", + "Query": "select col2 from u_tbl2 where id = 1 for update", "Table": "u_tbl2" }, { @@ -1104,7 +1104,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where id = 1 for update nowait", + "Query": "select col1 from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -1124,7 +1124,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1281,7 +1281,7 @@ "Sharded": true }, "FieldQuery": "select tbl3.colx from tbl3 where 1 != 1", - "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share nowait", + "Query": "select tbl3.colx from tbl3 where tbl3.colx + 10 is not null and not (tbl3.coly) <=> (tbl3.colx + 10) and tbl3.coly = 10 for share", "Table": "tbl3" }, { @@ -1292,7 +1292,7 @@ "Sharded": true }, "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", - "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share nowait", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = :tbl3_colx + 10 for share", "Table": "tbl1" } ] @@ -1361,7 +1361,7 @@ "Sharded": true }, "FieldQuery": "select 1 from tbl3 where 1 != 1", - "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share nowait", + "Query": "select 1 from tbl3 where not (tbl3.coly) <=> (20) and tbl3.coly = 10 for share", "Table": "tbl3" }, { @@ -1372,7 +1372,7 @@ "Sharded": true }, "FieldQuery": "select tbl1.t1col1 from tbl1 where 1 != 1", - "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share nowait", + "Query": "select tbl1.t1col1 from tbl1 where tbl1.t1col1 = 20 for share", "Table": "tbl1" } ] @@ -1421,7 +1421,7 @@ "Sharded": false }, "FieldQuery": "select col6 from u_tbl6 where 1 != 1", - "Query": "select col6 from u_tbl6 for update nowait", + "Query": "select col6 from u_tbl6 for update", "Table": "u_tbl6" }, { @@ -1497,7 +1497,7 @@ "Sharded": false }, "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update nowait", + "Query": "select col7 from u_tbl7 for update", "Table": "u_tbl7" }, { @@ -1517,7 +1517,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast('foo' as CHAR) where not (u_tbl4.col4) <=> (cast('foo' as CHAR)) and (u_tbl4.col4) in ::fkc_vals and u_tbl3.col3 is null limit 1 for share", "Table": "u_tbl3, u_tbl4" }, { @@ -1529,7 +1529,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in ((cast('foo' as CHAR))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share nowait", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (u_tbl9.col9) not in ((cast('foo' as CHAR))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share", "Table": "u_tbl4, u_tbl9" }, { @@ -1586,7 +1586,7 @@ "Sharded": false }, "FieldQuery": "select col7 from u_tbl7 where 1 != 1", - "Query": "select col7 from u_tbl7 for update nowait", + "Query": "select col7 from u_tbl7 for update", "Table": "u_tbl7" }, { @@ -1606,7 +1606,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where 1 != 1", - "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:v1 as CHAR) is not null and u_tbl3.col3 is null limit 1 for share nowait", + "Query": "select 1 from u_tbl4 left join u_tbl3 on u_tbl3.col3 = cast(:v1 as CHAR) where not (u_tbl4.col4) <=> (cast(:v1 as CHAR)) and (u_tbl4.col4) in ::fkc_vals and cast(:v1 as CHAR) is not null and u_tbl3.col3 is null limit 1 for share", "Table": "u_tbl3, u_tbl4" }, { @@ -1618,7 +1618,7 @@ "Sharded": false }, "FieldQuery": "select 1 from u_tbl4, u_tbl9 where 1 != 1", - "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share nowait", + "Query": "select 1 from u_tbl4, u_tbl9 where (u_tbl4.col4) in ::fkc_vals and (cast(:v1 as CHAR) is null or (u_tbl9.col9) not in ((cast(:v1 as CHAR)))) and u_tbl4.col4 = u_tbl9.col9 limit 1 for share", "Table": "u_tbl4, u_tbl9" }, { @@ -1693,7 +1693,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where id = 1 for update nowait", + "Query": "select col1 from u_tbl1 where id = 1 for update", "Table": "u_tbl1" }, { @@ -1713,7 +1713,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1869,7 +1869,7 @@ "Sharded": false }, "FieldQuery": "select col1 from u_tbl1 where 1 != 1", - "Query": "select col1 from u_tbl1 where (id) in ((1)) for update nowait", + "Query": "select col1 from u_tbl1 where (id) in ((1)) for update", "Table": "u_tbl1" }, { @@ -1889,7 +1889,7 @@ "Sharded": false }, "FieldQuery": "select col2 from u_tbl2 where 1 != 1", - "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update nowait", + "Query": "select col2 from u_tbl2 where (col2) in ::fkc_vals for update", "Table": "u_tbl2" }, { @@ -1975,7 +1975,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update nowait", + "Query": "select cola, colb from u_multicol_tbl1 where id = 3 for update", "Table": "u_multicol_tbl1" }, { @@ -1996,7 +1996,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update nowait", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (cola, colb) not in ((1, 2)) for update", "Table": "u_multicol_tbl2" }, { @@ -2069,7 +2069,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl1 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update nowait", + "Query": "select cola, colb from u_multicol_tbl1 where id = :v3 for update", "Table": "u_multicol_tbl1" }, { @@ -2090,7 +2090,7 @@ "Sharded": false }, "FieldQuery": "select cola, colb from u_multicol_tbl2 where 1 != 1", - "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update nowait", + "Query": "select cola, colb from u_multicol_tbl2 where (cola, colb) in ::fkc_vals and (:v2 is null or (:v1 is null or (cola, colb) not in ((:v1, :v2)))) for update", "Table": "u_multicol_tbl2" }, { @@ -2169,7 +2169,7 @@ "Sharded": true }, "FieldQuery": "select col5, t5col5 from tbl5 where 1 != 1", - "Query": "select col5, t5col5 from tbl5 where id = :v1 for update nowait", + "Query": "select col5, t5col5 from tbl5 where id = :v1 for update", "Table": "tbl5" }, { diff --git a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json index 8c4b7c89e44..b1c1c45001c 100644 --- a/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json +++ b/go/vt/vtgate/planbuilder/testdata/unsupported_cases.json @@ -54,11 +54,6 @@ "query": "update user_extra set val = 1 where (name = 'foo' or id = 1) limit 1", "plan": "VT12001: unsupported: multi shard UPDATE with LIMIT" }, - { - "comment": "multi delete multi table", - "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" - }, { "comment": "update changes primary vindex column", "query": "update user set id = 1 where id = 1", @@ -162,7 +157,7 @@ { "comment": "delete with multi-table targets", "query": "delete music,user from music inner join user where music.id = user.id", - "plan": "VT12001: unsupported: multi-shard or vindex write statement" + "plan": "VT12001: unsupported: multi-table DELETE statement in a sharded keyspace" }, { "comment": "select get_lock with non-dual table", @@ -388,5 +383,35 @@ "comment": "We need schema tracking to allow unexpanded columns inside UNION", "query": "select x from (select t.*, 0 as x from user t union select t.*, 1 as x from user_extra t) AS t", "plan": "VT09015: schema tracking required" + }, + { + "comment": "multi table delete with 2 sharded tables join on vindex column", + "query": "delete u, m from user u join music m on u.id = m.user_id", + "plan": "VT12001: unsupported: multi-table DELETE statement in a sharded keyspace" + }, + { + "comment": "multi table delete with 2 sharded tables join on non-vindex column", + "query": "delete u, m from user u join music m on u.col = m.col", + "plan": "VT12001: unsupported: multi-table DELETE statement in a sharded keyspace" + }, + { + "comment": "multi table delete with 1 sharded and 1 reference table", + "query": "delete u, r from user u join ref_with_source r on u.col = r.col", + "plan": "VT12001: unsupported: multi-table DELETE statement in a sharded keyspace" + }, + { + "comment": "multi delete multi table", + "query": "delete user from user join user_extra on user.id = user_extra.id where user.name = 'foo'", + "plan": "VT12001: unsupported: multi shard DELETE with join table references" + }, + { + "comment": "multi delete multi table with alias", + "query": "delete u from user u join music m on u.col = m.col", + "plan": "VT12001: unsupported: multi shard DELETE with join table references" + }, + { + "comment": "reference table delete with join", + "query": "delete r from user u join ref_with_source r on u.col = r.col", + "plan": "VT12001: unsupported: DELETE on reference table with join" } ] diff --git a/go/vt/vtgate/planbuilder/vindex_op.go b/go/vt/vtgate/planbuilder/vindex_op.go index c439dec1701..b06606070d7 100644 --- a/go/vt/vtgate/planbuilder/vindex_op.go +++ b/go/vt/vtgate/planbuilder/vindex_op.go @@ -33,8 +33,9 @@ func transformVindexPlan(ctx *plancontext.PlanningContext, op *operators.Vindex) } expr, err := evalengine.Translate(op.Value, &evalengine.Config{ - Collation: ctx.SemTable.Collation, - ResolveType: ctx.SemTable.TypeForExpr, + Collation: ctx.SemTable.Collation, + ResolveType: ctx.SemTable.TypeForExpr, + CollationEnv: ctx.VSchema.CollationEnv(), }) if err != nil { return nil, err diff --git a/go/vt/vtgate/plugin_mysql_server.go b/go/vt/vtgate/plugin_mysql_server.go index c3a67b1d7e1..0508b7029ba 100644 --- a/go/vt/vtgate/plugin_mysql_server.go +++ b/go/vt/vtgate/plugin_mysql_server.go @@ -420,6 +420,10 @@ func (vh *vtgateHandler) KillQuery(connectionID uint32) error { return nil } +func (vh *vtgateHandler) SQLParser() *sqlparser.Parser { + return vh.vtg.executor.parser +} + func (vh *vtgateHandler) session(c *mysql.Conn) *vtgatepb.Session { session, _ := c.ClientData.(*vtgatepb.Session) if session == nil { @@ -531,6 +535,7 @@ func initMySQLProtocol(vtgate *VTGate) *mysqlServer { mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) if err != nil { log.Exitf("mysql.NewListener failed: %v", err) @@ -577,6 +582,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) switch err := err.(type) { @@ -611,6 +617,7 @@ func newMysqlUnixSocket(address string, authServer mysql.AuthServer, handler mys mysqlKeepAlivePeriod, mysqlServerFlushDelay, servenv.MySQLServerVersion(), + servenv.TruncateErrLen, ) return listener, listenerErr default: diff --git a/go/vt/vtgate/plugin_mysql_server_test.go b/go/vt/vtgate/plugin_mysql_server_test.go index 21375050a4d..89786766e2f 100644 --- a/go/vt/vtgate/plugin_mysql_server_test.go +++ b/go/vt/vtgate/plugin_mysql_server_test.go @@ -30,20 +30,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/test/utils" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/trace" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tlstest" ) type testHandler struct { mysql.UnimplementedHandler lastConn *mysql.Conn + parser *sqlparser.Parser } func (th *testHandler) NewConnection(c *mysql.Conn) { @@ -83,6 +83,10 @@ func (th *testHandler) WarningCount(c *mysql.Conn) uint16 { return 0 } +func (th *testHandler) SQLParser() *sqlparser.Parser { + return th.parser +} + func TestConnectionUnixSocket(t *testing.T) { th := &testHandler{} @@ -348,7 +352,7 @@ func TestGracefulShutdown(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess") + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess", 0) require.NoError(t, err) defer listener.Close() @@ -378,7 +382,7 @@ func TestGracefulShutdownWithTransaction(t *testing.T) { vh := newVtgateHandler(&VTGate{executor: executor, timings: timings, rowsReturned: rowsReturned, rowsAffected: rowsAffected}) th := &testHandler{} - listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess") + listener, err := mysql.NewListener("tcp", "127.0.0.1:", mysql.NewAuthServerNone(), th, 0, 0, false, false, 0, 0, "8.0.30-Vitess", 0) require.NoError(t, err) defer listener.Close() diff --git a/go/vt/vtgate/querylogz.go b/go/vt/vtgate/querylogz.go index acfb970df5a..0e8c8044515 100644 --- a/go/vt/vtgate/querylogz.go +++ b/go/vt/vtgate/querylogz.go @@ -57,7 +57,7 @@ var ( querylogzFuncMap = template.FuncMap{ "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, + "truncateQuery": sqlparser.NewTestParser().TruncateForUI, "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` diff --git a/go/vt/vtgate/queryz.go b/go/vt/vtgate/queryz.go index e546fc68c6f..93bf347eeff 100644 --- a/go/vt/vtgate/queryz.go +++ b/go/vt/vtgate/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/engine" ) @@ -145,7 +144,7 @@ func queryzHandler(e *Executor, w http.ResponseWriter, r *http.Request) { e.ForEachPlan(func(plan *engine.Plan) bool { Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(e.parser.TruncateForUI(plan.Original)), } Value.Count, Value.tm, Value.ShardQueries, Value.RowsAffected, Value.RowsReturned, Value.Errors = plan.Stats() var timepq time.Duration diff --git a/go/vt/vtgate/safe_session.go b/go/vt/vtgate/safe_session.go index 2adb5b665a5..45fff46f629 100644 --- a/go/vt/vtgate/safe_session.go +++ b/go/vt/vtgate/safe_session.go @@ -73,6 +73,7 @@ type ( mu sync.Mutex entries []engine.ExecuteEntry lastID int + parser *sqlparser.Parser } // autocommitState keeps track of whether a single round-trip @@ -941,11 +942,13 @@ func (session *SafeSession) ClearAdvisoryLock() { session.AdvisoryLock = nil } -func (session *SafeSession) EnableLogging() { +func (session *SafeSession) EnableLogging(parser *sqlparser.Parser) { session.mu.Lock() defer session.mu.Unlock() - session.logging = &executeLogger{} + session.logging = &executeLogger{ + parser: parser, + } } // GetUDV returns the bind variable value for the user defined variable. @@ -998,7 +1001,7 @@ func (l *executeLogger) log(primitive engine.Primitive, target *querypb.Target, FiredFrom: primitive, }) } - ast, err := sqlparser.Parse(query) + ast, err := l.parser.Parse(query) if err != nil { panic("query not able to parse. this should not happen") } diff --git a/go/vt/vtgate/schema/tracker.go b/go/vt/vtgate/schema/tracker.go index a6d6cafc423..b5622c413c3 100644 --- a/go/vt/vtgate/schema/tracker.go +++ b/go/vt/vtgate/schema/tracker.go @@ -51,6 +51,8 @@ type ( // map of keyspace currently tracked tracked map[keyspaceStr]*updateController consumeDelay time.Duration + + parser *sqlparser.Parser } ) @@ -58,17 +60,18 @@ type ( const defaultConsumeDelay = 1 * time.Second // NewTracker creates the tracker object. -func NewTracker(ch chan *discovery.TabletHealth, enableViews bool) *Tracker { +func NewTracker(ch chan *discovery.TabletHealth, enableViews bool, parser *sqlparser.Parser) *Tracker { t := &Tracker{ ctx: context.Background(), ch: ch, tables: &tableMap{m: make(map[keyspaceStr]map[tableNameStr]*vindexes.TableInfo)}, tracked: map[keyspaceStr]*updateController{}, consumeDelay: defaultConsumeDelay, + parser: parser, } if enableViews { - t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}} + t.views = &viewMap{m: map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement{}, parser: parser} } return t } @@ -290,7 +293,7 @@ func (t *Tracker) updatedTableSchema(th *discovery.TabletHealth) bool { func (t *Tracker) updateTables(keyspace string, res map[string]string) { for tableName, tableDef := range res { - stmt, err := sqlparser.Parse(tableDef) + stmt, err := t.parser.Parse(tableDef) if err != nil { log.Warningf("error parsing table definition for %s: %v", tableName, err) continue @@ -483,7 +486,8 @@ func (t *Tracker) clearKeyspaceTables(ks string) { } type viewMap struct { - m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + m map[keyspaceStr]map[viewNameStr]sqlparser.SelectStatement + parser *sqlparser.Parser } func (vm *viewMap) set(ks, tbl, sql string) { @@ -492,7 +496,7 @@ func (vm *viewMap) set(ks, tbl, sql string) { m = make(map[tableNameStr]sqlparser.SelectStatement) vm.m[ks] = m } - stmt, err := sqlparser.Parse(sql) + stmt, err := vm.parser.Parse(sql) if err != nil { log.Warningf("ignoring view '%s', parsing error in view definition: '%s'", tbl, sql) return diff --git a/go/vt/vtgate/schema/tracker_test.go b/go/vt/vtgate/schema/tracker_test.go index ce2a2d79b56..7b60278cbbf 100644 --- a/go/vt/vtgate/schema/tracker_test.go +++ b/go/vt/vtgate/schema/tracker_test.go @@ -81,7 +81,7 @@ func TestTrackingUnHealthyTablet(t *testing.T) { sbc := sandboxconn.NewSandboxConn(tablet) ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, false) + tracker := NewTracker(ch, false, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() @@ -396,7 +396,7 @@ type testCases struct { func testTracker(t *testing.T, schemaDefResult []map[string]string, tcases []testCases) { ch := make(chan *discovery.TabletHealth) - tracker := NewTracker(ch, true) + tracker := NewTracker(ch, true, sqlparser.NewTestParser()) tracker.consumeDelay = 1 * time.Millisecond tracker.Start() defer tracker.Stop() diff --git a/go/vt/vtgate/semantics/FakeSI.go b/go/vt/vtgate/semantics/FakeSI.go index 5a91ece816d..94386590814 100644 --- a/go/vt/vtgate/semantics/FakeSI.go +++ b/go/vt/vtgate/semantics/FakeSI.go @@ -47,7 +47,11 @@ func (s *FakeSI) FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Tab } func (*FakeSI) ConnCollation() collations.ID { - return 45 + return collations.CollationUtf8mb4ID +} + +func (*FakeSI) CollationEnv() *collations.Environment { + return collations.MySQL8() } func (s *FakeSI) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { diff --git a/go/vt/vtgate/semantics/analyzer.go b/go/vt/vtgate/semantics/analyzer.go index d7dab3da078..17e9398f7f6 100644 --- a/go/vt/vtgate/semantics/analyzer.go +++ b/go/vt/vtgate/semantics/analyzer.go @@ -50,7 +50,7 @@ func newAnalyzer(dbName string, si SchemaInformation) *analyzer { a := &analyzer{ scoper: s, tables: newTableCollector(s, si, dbName), - typer: newTyper(), + typer: newTyper(si.CollationEnv()), } s.org = a a.tables.org = a @@ -61,6 +61,7 @@ func newAnalyzer(dbName string, si SchemaInformation) *analyzer { scoper: s, binder: b, expandedColumns: map[sqlparser.TableName][]*sqlparser.ColName{}, + collationEnv: si.CollationEnv(), } s.binder = b return a @@ -118,6 +119,7 @@ func (a *analyzer) newSemTable(statement sqlparser.Statement, coll collations.ID Direct: a.binder.direct, ExprTypes: a.typer.m, Tables: a.tables.Tables, + Targets: a.binder.targets, NotSingleRouteErr: a.projErr, NotUnshardedErr: a.unshardedErr, Warning: a.warning, diff --git a/go/vt/vtgate/semantics/analyzer_test.go b/go/vt/vtgate/semantics/analyzer_test.go index d27d5a926c6..e222cf619bd 100644 --- a/go/vt/vtgate/semantics/analyzer_test.go +++ b/go/vt/vtgate/semantics/analyzer_test.go @@ -120,7 +120,7 @@ func TestBindingSingleTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "d", &FakeSI{}) require.NoError(t, err) @@ -140,7 +140,7 @@ func TestBindingSingleAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -238,7 +238,7 @@ func TestBindingMultiTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -262,7 +262,7 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { } for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) _, err = Analyze(parse, "d", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -275,6 +275,26 @@ func TestBindingMultiAliasedTableNegative(t *testing.T) { } } +func TestBindingDelete(t *testing.T) { + queries := []string{ + "delete tbl from tbl", + "delete from tbl", + "delete t1 from t1, t2", + } + for _, query := range queries { + t.Run(query, func(t *testing.T) { + stmt, semTable := parseAndAnalyze(t, query, "d") + del := stmt.(*sqlparser.Delete) + t1 := del.TableExprs[0].(*sqlparser.AliasedTableExpr) + ts := semTable.TableSetFor(t1) + assert.Equal(t, SingleTableSet(0), ts) + + actualTs := semTable.Targets[del.Targets[0].Name] + assert.Equal(t, ts, actualTs) + }) + } +} + func TestNotUniqueTableName(t *testing.T) { queries := []string{ "select * from t, t", @@ -285,7 +305,7 @@ func TestNotUniqueTableName(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) _, err := Analyze(parse, "test", &FakeSI{}) require.Error(t, err) require.Contains(t, err.Error(), "VT03013: not unique table/alias") @@ -300,7 +320,7 @@ func TestMissingTable(t *testing.T) { for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) st, err := Analyze(parse, "", &FakeSI{}) require.NoError(t, err) require.ErrorContains(t, st.NotUnshardedErr, "column 't.col' not found") @@ -388,7 +408,7 @@ func TestUnknownColumnMap2(t *testing.T) { queries := []string{"select col from a, b", "select col from a as user, b as extra"} for _, query := range queries { t.Run(query, func(t *testing.T) { - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) expr := extract(parse.(*sqlparser.Select), 0) for _, test := range tests { @@ -419,7 +439,7 @@ func TestUnknownPredicate(t *testing.T) { Name: sqlparser.NewIdentifierCS("b"), } - parse, _ := sqlparser.Parse(query) + parse, _ := sqlparser.NewTestParser().Parse(query) tests := []struct { name string @@ -457,7 +477,7 @@ func TestScoping(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -537,7 +557,7 @@ func TestSubqueryOrderByBinding(t *testing.T) { for _, tc := range queries { t.Run(tc.query, func(t *testing.T) { - ast, err := sqlparser.Parse(tc.query) + ast, err := sqlparser.NewTestParser().Parse(tc.query) require.NoError(t, err) sel := ast.(*sqlparser.Select) @@ -842,7 +862,7 @@ func TestInvalidQueries(t *testing.T) { for _, tc := range tcases { t.Run(tc.sql, func(t *testing.T) { - parse, err := sqlparser.Parse(tc.sql) + parse, err := sqlparser.NewTestParser().Parse(tc.sql) require.NoError(t, err) st, err := Analyze(parse, "dbName", fakeSchemaInfo()) @@ -961,7 +981,7 @@ func TestScopingWDerivedTables(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1063,7 +1083,7 @@ func TestScopingWithWITH(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1114,7 +1134,7 @@ func TestJoinPredicateDependencies(t *testing.T) { }} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", fakeSchemaInfo()) @@ -1173,7 +1193,7 @@ func TestDerivedTablesOrderClause(t *testing.T) { si := &FakeSI{Tables: map[string]*vindexes.Table{"t": {Name: sqlparser.NewIdentifierCS("t")}}} for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", si) @@ -1207,7 +1227,7 @@ func TestScopingWComplexDerivedTables(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ Tables: map[string]*vindexes.Table{ @@ -1248,7 +1268,7 @@ func TestScopingWVindexTables(t *testing.T) { } for _, query := range queries { t.Run(query.query, func(t *testing.T) { - parse, err := sqlparser.Parse(query.query) + parse, err := sqlparser.NewTestParser().Parse(query.query) require.NoError(t, err) hash, _ := vindexes.CreateVindex("hash", "user_index", nil) st, err := Analyze(parse, "user", &FakeSI{ @@ -1290,7 +1310,7 @@ func BenchmarkAnalyzeMultipleDifferentQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1314,7 +1334,7 @@ func BenchmarkAnalyzeUnionQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1340,7 +1360,7 @@ func BenchmarkAnalyzeSubQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1370,7 +1390,7 @@ func BenchmarkAnalyzeDerivedTableQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1396,7 +1416,7 @@ func BenchmarkAnalyzeHavingQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1425,7 +1445,7 @@ func BenchmarkAnalyzeGroupByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1448,7 +1468,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { for i := 0; i < b.N; i++ { for _, query := range queries { - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(b, err) _, _ = Analyze(parse, "d", fakeSchemaInfo()) @@ -1458,7 +1478,7 @@ func BenchmarkAnalyzeOrderByQueries(b *testing.B) { func parseAndAnalyze(t *testing.T, query, dbName string) (sqlparser.Statement, *SemTable) { t.Helper() - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) semTable, err := Analyze(parse, dbName, fakeSchemaInfo()) @@ -1529,7 +1549,7 @@ func TestNextErrors(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) _, err = Analyze(parse, "d", fakeSchemaInfo()) @@ -1553,7 +1573,7 @@ func TestUpdateErrors(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) st, err := Analyze(parse, "d", fakeSchemaInfo()) @@ -1571,7 +1591,7 @@ func TestUpdateErrors(t *testing.T) { func TestScopingSubQueryJoinClause(t *testing.T) { query := "select (select 1 from u1 join u2 on u1.id = u2.id and u2.id = u3.id) x from u3" - parse, err := sqlparser.Parse(query) + parse, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) st, err := Analyze(parse, "user", &FakeSI{ diff --git a/go/vt/vtgate/semantics/binder.go b/go/vt/vtgate/semantics/binder.go index 27d059673cb..33422c3aa37 100644 --- a/go/vt/vtgate/semantics/binder.go +++ b/go/vt/vtgate/semantics/binder.go @@ -20,6 +20,8 @@ import ( "strings" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" ) // binder is responsible for finding all the column references in @@ -29,6 +31,7 @@ import ( type binder struct { recursive ExprDependencies direct ExprDependencies + targets map[sqlparser.IdentifierCS]TableSet scoper *scoper tc *tableCollector org originable @@ -44,6 +47,7 @@ func newBinder(scoper *scoper, org originable, tc *tableCollector, typer *typer) return &binder{ recursive: map[sqlparser.Expr]TableSet{}, direct: map[sqlparser.Expr]TableSet{}, + targets: map[sqlparser.IdentifierCS]TableSet{}, scoper: scoper, org: org, tc: tc, @@ -106,10 +110,47 @@ func (b *binder) up(cursor *sqlparser.Cursor) error { b.typer.m[ae.Expr] = t } } + case sqlparser.TableNames: + _, isDelete := cursor.Parent().(*sqlparser.Delete) + if !isDelete { + return nil + } + current := b.scoper.currentScope() + for _, target := range node { + finalDep, err := b.findDependentTableSet(current, target) + if err != nil { + return err + } + b.targets[target.Name] = finalDep.direct + } } return nil } +func (b *binder) findDependentTableSet(current *scope, target sqlparser.TableName) (dependency, error) { + var deps dependencies = ¬hing{} + for _, table := range current.tables { + tblName, err := table.Name() + if err != nil { + continue + } + if tblName.Name.String() != target.Name.String() { + continue + } + ts := b.org.tableSetFor(table.GetAliasedTableExpr()) + c := createCertain(ts, ts, evalengine.Type{}) + deps = deps.merge(c, false) + } + finalDep, err := deps.get() + if err != nil { + return dependency{}, err + } + if finalDep.direct != finalDep.recursive { + return dependency{}, vterrors.VT03004(target.Name.String()) + } + return finalDep, nil +} + func (b *binder) bindCountStar(node *sqlparser.CountStar) { scope := b.scoper.currentScope() var ts TableSet diff --git a/go/vt/vtgate/semantics/derived_table.go b/go/vt/vtgate/semantics/derived_table.go index fd649436ab0..0425d78ed93 100644 --- a/go/vt/vtgate/semantics/derived_table.go +++ b/go/vt/vtgate/semantics/derived_table.go @@ -141,7 +141,7 @@ func (dt *DerivedTable) Name() (sqlparser.TableName, error) { return dt.ASTNode.TableName() } -func (dt *DerivedTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (dt *DerivedTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return dt.ASTNode } diff --git a/go/vt/vtgate/semantics/early_rewriter.go b/go/vt/vtgate/semantics/early_rewriter.go index 1a957c8ae60..3c1235dd376 100644 --- a/go/vt/vtgate/semantics/early_rewriter.go +++ b/go/vt/vtgate/semantics/early_rewriter.go @@ -33,6 +33,7 @@ type earlyRewriter struct { clause string warning string expandedColumns map[sqlparser.TableName][]*sqlparser.ColName + collationEnv *collations.Environment } func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { @@ -46,9 +47,9 @@ func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { case sqlparser.OrderBy: handleOrderBy(r, cursor, node) case *sqlparser.OrExpr: - rewriteOrExpr(cursor, node) + rewriteOrExpr(cursor, node, r.collationEnv) case *sqlparser.AndExpr: - rewriteAndExpr(cursor, node) + rewriteAndExpr(cursor, node, r.collationEnv) case *sqlparser.NotExpr: rewriteNotExpr(cursor, node) case sqlparser.GroupBy: @@ -63,6 +64,21 @@ func (r *earlyRewriter) down(cursor *sqlparser.Cursor) error { return r.handleWith(node) case *sqlparser.AliasedTableExpr: return r.handleAliasedTable(node) + case *sqlparser.Delete: + // When we do not have any target, it is a single table delete. + // In a single table delete, the table references is always a single aliased table expression. + if len(node.Targets) != 0 { + return nil + } + tblExpr, ok := node.TableExprs[0].(*sqlparser.AliasedTableExpr) + if !ok { + return nil + } + tblName, err := tblExpr.TableName() + if err != nil { + return err + } + node.Targets = append(node.Targets, tblName) } return nil } @@ -176,36 +192,40 @@ func handleOrderBy(r *earlyRewriter, cursor *sqlparser.Cursor, node sqlparser.Or } // rewriteOrExpr rewrites OR expressions when the right side is FALSE. -func rewriteOrExpr(cursor *sqlparser.Cursor, node *sqlparser.OrExpr) { - newNode := rewriteOrFalse(*node) +func rewriteOrExpr(cursor *sqlparser.Cursor, node *sqlparser.OrExpr, collationEnv *collations.Environment) { + newNode := rewriteOrFalse(*node, collationEnv) if newNode != nil { cursor.ReplaceAndRevisit(newNode) } } // rewriteAndExpr rewrites AND expressions when either side is TRUE. -func rewriteAndExpr(cursor *sqlparser.Cursor, node *sqlparser.AndExpr) { - newNode := rewriteAndTrue(*node) +func rewriteAndExpr(cursor *sqlparser.Cursor, node *sqlparser.AndExpr, collationEnv *collations.Environment) { + newNode := rewriteAndTrue(*node, collationEnv) if newNode != nil { cursor.ReplaceAndRevisit(newNode) } } -func rewriteAndTrue(andExpr sqlparser.AndExpr) sqlparser.Expr { +func rewriteAndTrue(andExpr sqlparser.AndExpr, collationEnv *collations.Environment) sqlparser.Expr { // we are looking for the pattern `WHERE c = 1 AND 1 = 1` isTrue := func(subExpr sqlparser.Expr) bool { - evalEnginePred, err := evalengine.Translate(subExpr, nil) + coll := collationEnv.DefaultConnectionCharset() + evalEnginePred, err := evalengine.Translate(subExpr, &evalengine.Config{ + CollationEnv: collationEnv, + Collation: coll, + }) if err != nil { return false } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collationEnv) res, err := env.Evaluate(evalEnginePred) if err != nil { return false } - boolValue, err := res.Value(collations.Default()).ToBool() + boolValue, err := res.Value(coll).ToBool() if err != nil { return false } @@ -419,21 +439,25 @@ func realCloneOfColNames(expr sqlparser.Expr, union bool) sqlparser.Expr { }, nil).(sqlparser.Expr) } -func rewriteOrFalse(orExpr sqlparser.OrExpr) sqlparser.Expr { +func rewriteOrFalse(orExpr sqlparser.OrExpr, collationEnv *collations.Environment) sqlparser.Expr { // we are looking for the pattern `WHERE c = 1 OR 1 = 0` isFalse := func(subExpr sqlparser.Expr) bool { - evalEnginePred, err := evalengine.Translate(subExpr, nil) + coll := collationEnv.DefaultConnectionCharset() + evalEnginePred, err := evalengine.Translate(subExpr, &evalengine.Config{ + CollationEnv: collationEnv, + Collation: coll, + }) if err != nil { return false } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(collationEnv) res, err := env.Evaluate(evalEnginePred) if err != nil { return false } - boolValue, err := res.Value(collations.Default()).ToBool() + boolValue, err := res.Value(coll).ToBool() if err != nil { return false } diff --git a/go/vt/vtgate/semantics/early_rewriter_test.go b/go/vt/vtgate/semantics/early_rewriter_test.go index bf09d2d5cc3..476f993f3d7 100644 --- a/go/vt/vtgate/semantics/early_rewriter_test.go +++ b/go/vt/vtgate/semantics/early_rewriter_test.go @@ -187,7 +187,7 @@ func TestExpandStar(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -288,7 +288,7 @@ func TestRewriteJoinUsingColumns(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -346,7 +346,7 @@ func TestOrderByGroupByLiteral(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement := ast.(*sqlparser.Select) _, err = Analyze(selectStatement, cDB, schemaInfo) @@ -381,7 +381,7 @@ func TestHavingAndOrderByColumnName(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement := ast.(*sqlparser.Select) _, err = Analyze(selectStatement, cDB, schemaInfo) @@ -426,7 +426,7 @@ func TestSemTableDependenciesAfterExpandStar(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -486,7 +486,7 @@ func TestRewriteNot(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) selectStatement, isSelectStatement := ast.(*sqlparser.Select) require.True(t, isSelectStatement, "analyzer expects a select statement") @@ -538,7 +538,7 @@ func TestConstantFolding(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, schemaInfo) require.NoError(t, err) @@ -565,7 +565,7 @@ func TestCTEToDerivedTableRewrite(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.sql, func(t *testing.T) { - ast, err := sqlparser.Parse(tcase.sql) + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) require.NoError(t, err) _, err = Analyze(ast, cDB, fakeSchemaInfo()) require.NoError(t, err) @@ -573,3 +573,33 @@ func TestCTEToDerivedTableRewrite(t *testing.T) { }) } } + +// TestDeleteTargetTableRewrite checks that delete target rewrite is done correctly. +func TestDeleteTargetTableRewrite(t *testing.T) { + cDB := "db" + tcases := []struct { + sql string + target string + }{{ + sql: "delete from t", + target: "t", + }, { + sql: "delete from t t1", + target: "t1", + }, { + sql: "delete t2 from t t1, t t2", + target: "t2", + }, { + sql: "delete t2,t1 from t t1, t t2", + target: "t2, t1", + }} + for _, tcase := range tcases { + t.Run(tcase.sql, func(t *testing.T) { + ast, err := sqlparser.NewTestParser().Parse(tcase.sql) + require.NoError(t, err) + _, err = Analyze(ast, cDB, fakeSchemaInfo()) + require.NoError(t, err) + require.Equal(t, tcase.target, sqlparser.String(ast.(*sqlparser.Delete).Targets)) + }) + } +} diff --git a/go/vt/vtgate/semantics/info_schema.go b/go/vt/vtgate/semantics/info_schema.go index 76a383b5ac0..66315937174 100644 --- a/go/vt/vtgate/semantics/info_schema.go +++ b/go/vt/vtgate/semantics/info_schema.go @@ -30,11 +30,11 @@ import ( "vitess.io/vitess/go/vt/vtgate/vindexes" ) -func createCol(name string, typ int, collation string, def string, size, scale int32, notNullable bool, values string) vindexes.Column { +func createCol(parser *sqlparser.Parser, name string, typ int, collation string, def string, size, scale int32, notNullable bool, values string) vindexes.Column { var expr sqlparser.Expr if def != "" { var err error - expr, err = sqlparser.ParseExpr(def) + expr, err = parser.ParseExpr(def) if err != nil { panic(fmt.Sprintf("Failed to parse %q: %v", def, err)) } @@ -66,628 +66,632 @@ func createCol(name string, typ int, collation string, def string, size, scale i // getInfoSchema57 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema57() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "5.7.9"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 60, 0, true, "")) - cols = append(cols, createCol("MAXLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 60, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("ID", 265, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SORTLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 265, "utf8mb3_general_ci", "0", 3, 0, true, "")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_KEY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("PRIVILEGES", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("COLUMN_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("INTERVAL_FIELD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) - cols = append(cols, createCol("ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORIGINATOR", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("FILE_NAME", 6165, "utf8mb3_general_ci", "", 4000, 0, false, "")) - cols = append(cols, createCol("FILE_TYPE", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("FULLTEXT_KEYS", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DELETED_ROWS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("UPDATE_COUNT", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATION_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_ACCESS_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("RECOVER_TIME", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("TRANSACTION_COUNTER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6165, "utf8mb3_general_ci", "", 4000, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 265, "utf8mb3_general_ci", "", 4, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_STATUS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "VARIABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VARIABLE_VALUE", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) infSchema["GLOBAL_VARIABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("BLOCK_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LRU_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POOL_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("FREE_BUFFERS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_DECOMPRESS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_READS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PAGES_READ_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_GET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("HIT_RATE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("LRU_IO_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LRU_IO_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1036, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "0", 5, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("COUNT_RESET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT_RESET", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 202, 0, false, "")) - cols = append(cols, createCol("N_COLS", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("SPACE", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) - cols = append(cols, createCol("PER_TABLE_TABLESPACE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_COMPRESSED", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 202, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "0", 11, 0, true, "")) + cols = append(cols, createCol(parser, "PER_TABLE_TABLESPACE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_COMPRESSED", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) - cols = append(cols, createCol("trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) - cols = append(cols, createCol("trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 81, 0, false, "")) - cols = append(cols, createCol("trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("trx_weight", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_mysql_thread_id", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("trx_tables_in_use", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_tables_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_lock_structs", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_lock_memory_bytes", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_rows_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_rows_modified", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_concurrency_tickets", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("trx_unique_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_foreign_key_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("trx_is_read_only", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_id", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 81, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 265, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 265, "utf8mb3_general_ci", "0", 10, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 265, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TRACE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "QUERY", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "0", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 263, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) - cols = append(cols, createCol("PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 263, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("PARTITION_COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("NODEGROUP", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 18, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("TIME", 263, "utf8mb3_general_ci", "0", 7, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INFO", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ID", 265, "utf8mb3_general_ci", "0", 21, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "0", 7, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("SEQ", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("DURATION", 18, "utf8mb3_general_ci", "0.000000", 9, 6, true, "")) - cols = append(cols, createCol("CPU_USER", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) - cols = append(cols, createCol("CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SWAPS", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) - cols = append(cols, createCol("SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_LINE", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "0", 20, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "0.000000", 9, 6, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 9, 6, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 20, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("MATCH_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UPDATE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DELETE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DELETE_RULE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SQL_DATA_ACCESS", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 263, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 6165, "utf8mb3_general_ci", "", 512, 0, false, "")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 6165, "utf8mb3_general_ci", "", 512, 0, false, "")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("NON_UNIQUE", 265, "utf8mb3_general_ci", "0", 1, 0, true, "")) - cols = append(cols, createCol("INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SEQ_IN_INDEX", 265, "utf8mb3_general_ci", "0", 2, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("CARDINALITY", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("SUB_PART", 265, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("PACKED", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 16, 0, false, "")) - cols = append(cols, createCol("INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 265, "utf8mb3_general_ci", "0", 1, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 265, "utf8mb3_general_ci", "0", 2, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 16, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 1024, 0, true, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTO_INCREMENT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("TABLE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("NODEGROUP_ID", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 265, "utf8mb3_general_ci", "", 21, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_MANIPULATION", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ACTION_ORDER", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) - cols = append(cols, createCol("ACTION_CONDITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("ACTION_TIMING", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("CREATED", 2064, "utf8mb3_general_ci", "", 2, 0, false, "")) - cols = append(cols, createCol("SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 265, "utf8mb3_general_ci", "0", 4, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 6165, "utf8mb3_general_ci", "", 6, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2064, "utf8mb3_general_ci", "", 2, 0, false, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 6165, "utf8mb3_general_ci", "", 8192, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 81, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CHECK_OPTION", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("IS_UPDATABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 93, 0, true, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) infSchema["VIEWS"] = cols return infSchema } @@ -695,897 +699,901 @@ func getInfoSchema57() map[string][]vindexes.Column { // getInfoSchema80 returns a map of all information_schema tables and their columns with types // To recreate this information from MySQL, you can run the test in info_schema_gen_test.go func getInfoSchema80() map[string][]vindexes.Column { + parser, err := sqlparser.New(sqlparser.Options{MySQLServerVersion: "8.0.30"}) + if err != nil { + panic(err) + } infSchema := map[string][]vindexes.Column{} var cols []vindexes.Column - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ADMINISTRABLE_ROLE_AUTHORIZATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["APPLICABLE_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("MAXLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "MAXLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHARACTER_SETS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHECK_CLAUSE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHECK_CLAUSE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["CHECK_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["COLLATION_CHARACTER_SET_APPLICABILITY"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ID", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SORTLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAD_ATTRIBUTE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'PAD SPACE','NO PAD'")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_COMPILED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SORTLEN", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAD_ATTRIBUTE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'PAD SPACE','NO PAD'")) infSchema["COLLATIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["COLUMN_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("HISTOGRAM", 2078, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "HISTOGRAM", 2078, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["COLUMN_STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_KEY", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'','PRI','UNI','MUL'")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("PRIVILEGES", 6165, "utf8mb3_general_ci", "", 154, 0, false, "")) - cols = append(cols, createCol("COLUMN_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_DEFAULT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "IS_NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_KEY", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'','PRI','UNI','MUL'")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PRIVILEGES", 6165, "utf8mb3_general_ci", "", 154, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "GENERATION_EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["COLUMNS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("ROLE_HOST", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ROLE_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "ROLE_HOST", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "IS_DEFAULT", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IS_MANDATORY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ENABLED_ROLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SUPPORT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "TRANSACTIONS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "XA", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "SAVEPOINTS", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["ENGINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_BODY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) - cols = append(cols, createCol("EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("INTERVAL_FIELD", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND'")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'ENABLED','DISABLED','SLAVESIDE_DISABLED'")) - cols = append(cols, createCol("ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("ORIGINATOR", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "TIME_ZONE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_BODY", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_TYPE", 6165, "utf8mb3_general_ci", "", 9, 0, true, "")) + cols = append(cols, createCol(parser, "EXECUTE_AT", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_VALUE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "INTERVAL_FIELD", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'YEAR','QUARTER','MONTH','DAY','HOUR','MINUTE','WEEK','SECOND','MICROSECOND','YEAR_MONTH','DAY_HOUR','DAY_MINUTE','DAY_SECOND','HOUR_MINUTE','HOUR_SECOND','MINUTE_SECOND','DAY_MICROSECOND','HOUR_MICROSECOND','MINUTE_MICROSECOND','SECOND_MICROSECOND'")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "STARTS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENDS", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'ENABLED','DISABLED','SLAVESIDE_DISABLED'")) + cols = append(cols, createCol(parser, "ON_COMPLETION", 6165, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_EXECUTED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "ORIGINATOR", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["EVENTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("FILE_ID", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FILE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FILE_TYPE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6167, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("FULLTEXT_KEYS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DELETED_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_COUNT", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATION_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("LAST_ACCESS_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("RECOVER_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TRANSACTION_COUNTER", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("VERSION", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_ID", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FILE_TYPE", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6167, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NUMBER", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "FULLTEXT_KEYS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DELETED_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_COUNT", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TOTAL_EXTENTS", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INITIAL_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATION_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "LAST_ACCESS_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "RECOVER_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TRANSACTION_COUNTER", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "EXTRA", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["FILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("BLOCK_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_STALE", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BLOCK_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_STALE", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) infSchema["INNODB_BUFFER_PAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LRU_POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) - cols = append(cols, createCol("FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NUMBER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "FLUSH_TYPE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FIX_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "IS_HASHED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "NEWEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLDEST_MODIFICATION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACCESS_TIME", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "NUMBER_RECORDS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATA_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COMPRESSED", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "IO_FIX", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "IS_OLD", 6165, "utf8mb3_general_ci", "", 3, 0, false, "")) + cols = append(cols, createCol(parser, "FREE_PAGE_CLOCK", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_PAGE_LRU"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POOL_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FREE_BUFFERS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OLD_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MODIFIED_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_DECOMPRESS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_READS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LRU", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PENDING_FLUSH_LIST", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_NOT_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_MADE_NOT_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_CREATED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_WRITTEN", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGES_READ_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_CREATE_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("PAGES_WRITTEN_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_GET", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("HIT_RATE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_PAGES_READ_AHEAD", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NUMBER_READ_AHEAD_EVICTED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("READ_AHEAD_EVICTED_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) - cols = append(cols, createCol("LRU_IO_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LRU_IO_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UNCOMPRESS_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POOL_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FREE_BUFFERS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OLD_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_DATABASE_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_DECOMPRESS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_READS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LRU", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PENDING_FLUSH_LIST", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_NOT_MADE_YOUNG", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_MADE_NOT_YOUNG_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_CREATED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_WRITTEN", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_READ_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_CREATE_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "PAGES_WRITTEN_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_GET", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HIT_RATE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NOT_YOUNG_MAKE_PER_THOUSAND_GETS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_PAGES_READ_AHEAD", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NUMBER_READ_AHEAD_EVICTED", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "READ_AHEAD_EVICTED_RATE", 1035, "utf8mb3_general_ci", "", 12, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LRU_IO_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_TOTAL", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UNCOMPRESS_CURRENT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_BUFFER_POOL_STATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_CACHED_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_CACHED_PAGES", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CACHED_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "database_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "table_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "index_name", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_PER_INDEX_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_ops_ok", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "compress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_ops", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "uncompress_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMP_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "page_size", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "buffer_pool_instance", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_used", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "pages_free", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_ops", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "relocation_time", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_CMPMEM_RESET"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("POS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PRTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LEN", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("HAS_DEFAULT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFAULT_VALUE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PRTYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LEN", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "HAS_DEFAULT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_VALUE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) infSchema["INNODB_DATAFILES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("POS", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FIELDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("FOR_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("REF_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("N_COLS", 265, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("TYPE", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "REF_NAME", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 265, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) infSchema["INNODB_FOREIGN"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) - cols = append(cols, createCol("FOR_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REF_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 6165, "utf8mb3_general_ci", "", 129, 0, false, "")) + cols = append(cols, createCol(parser, "FOR_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COL_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FOREIGN_COLS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_BEING_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "KEY", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "VALUE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_FT_CONFIG"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) + cols = append(cols, createCol(parser, "value", 6165, "utf8mb3_general_ci", "", 18, 0, true, "")) infSchema["INNODB_FT_DEFAULT_STOPWORD"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_DELETED"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_CACHE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) - cols = append(cols, createCol("FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 337, 0, true, "")) + cols = append(cols, createCol(parser, "FIRST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_COUNT", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DOC_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_FT_INDEX_TABLE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_FIELDS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PAGE_NO", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MERGE_THRESHOLD", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_FIELDS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PAGE_NO", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MERGE_THRESHOLD", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_INDEXES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COUNT", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_COUNT_RESET", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "SUBSYSTEM", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COUNT", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MAX_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MIN_COUNT_RESET", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_COUNT_RESET", 1035, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ENABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_DISABLED", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_ELAPSED", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TIME_RESET", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "STATUS", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "TYPE", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) infSchema["INNODB_METRICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 4001, 0, true, "")) - cols = append(cols, createCol("SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) - cols = append(cols, createCol("PURPOSE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 4001, 0, true, "")) + cols = append(cols, createCol(parser, "SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) + cols = append(cols, createCol(parser, "PURPOSE", 6165, "utf8mb3_general_ci", "", 192, 0, true, "")) infSchema["INNODB_SESSION_TEMP_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) - cols = append(cols, createCol("FLAG", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("N_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("INSTANT_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("TOTAL_ROW_VERSIONS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "N_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 265, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 12, 0, false, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "INSTANT_COLS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TOTAL_ROW_VERSIONS", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) - cols = append(cols, createCol("FLAG", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 22, 0, false, "")) - cols = append(cols, createCol("PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("FS_BLOCK_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("FILE_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ALLOCATED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SERVER_VERSION", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) - cols = append(cols, createCol("SPACE_VERSION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ENCRYPTION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 655, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 6165, "utf8mb3_general_ci", "", 22, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ZIP_PAGE_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "FS_BLOCK_SIZE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "FILE_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ALLOCATED_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SERVER_VERSION", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_VERSION", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ENCRYPTION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 10, 0, false, "")) infSchema["INNODB_TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("FLAG", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "PATH", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "FLAG", 10262, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "SPACE_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, true, "")) infSchema["INNODB_TABLESPACES_BRIEF"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("STATS_INITIALIZED", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) - cols = append(cols, createCol("NUM_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("CLUST_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("OTHER_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("MODIFIED_COUNTER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("AUTOINC", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("REF_COUNT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "STATS_INITIALIZED", 6165, "utf8mb3_general_ci", "", 193, 0, true, "")) + cols = append(cols, createCol(parser, "NUM_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "CLUST_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "OTHER_INDEX_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "MODIFIED_COUNTER", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "AUTOINC", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "REF_COUNT", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TABLESTATS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("N_COLS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "N_COLS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SPACE", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_TEMP_TABLE_INFO"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("trx_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) - cols = append(cols, createCol("trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 105, 0, false, "")) - cols = append(cols, createCol("trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("trx_weight", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_mysql_thread_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) - cols = append(cols, createCol("trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("trx_tables_in_use", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_tables_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_lock_structs", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_lock_memory_bytes", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_rows_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_rows_modified", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_concurrency_tickets", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("trx_unique_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_foreign_key_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_adaptive_hash_timeout", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_is_read_only", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("trx_schedule_weight", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_state", 6165, "utf8mb3_general_ci", "", 13, 0, true, "")) + cols = append(cols, createCol(parser, "trx_started", 2064, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_requested_lock_id", 6165, "utf8mb3_general_ci", "", 105, 0, false, "")) + cols = append(cols, createCol(parser, "trx_wait_started", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "trx_weight", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_mysql_thread_id", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_query", 6165, "utf8mb3_general_ci", "", 1024, 0, false, "")) + cols = append(cols, createCol(parser, "trx_operation_state", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "trx_tables_in_use", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_tables_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_structs", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_lock_memory_bytes", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_locked", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_rows_modified", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_concurrency_tickets", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_isolation_level", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "trx_unique_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_foreign_key_checks", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_last_foreign_key_error", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_latched", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_adaptive_hash_timeout", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_is_read_only", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_autocommit_non_locking", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "trx_schedule_weight", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["INNODB_TRX"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("BASE_POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "BASE_POS", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["INNODB_VIRTUAL"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 776, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("POSITION_IN_UNIQUE_CONSTRAINT", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 776, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "POSITION_IN_UNIQUE_CONSTRAINT", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "REFERENCED_COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["KEY_COLUMN_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("WORD", 6165, "utf8mb3_general_ci", "", 128, 0, false, "")) - cols = append(cols, createCol("RESERVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "WORD", 6165, "utf8mb3_general_ci", "", 128, 0, false, "")) + cols = append(cols, createCol(parser, "RESERVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["KEYWORDS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) - cols = append(cols, createCol("TRACE", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) - cols = append(cols, createCol("MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) + cols = append(cols, createCol(parser, "QUERY", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "TRACE", 6165, "utf8mb3_general_ci", "", 65535, 0, true, "")) + cols = append(cols, createCol(parser, "MISSING_BYTES_BEYOND_MAX_MEM_SIZE", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INSUFFICIENT_PRIVILEGES", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) infSchema["OPTIMIZER_TRACE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ORDINAL_POSITION", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) - cols = append(cols, createCol("PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ORDINAL_POSITION", 778, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_MODE", 6165, "utf8mb3_general_ci", "", 5, 0, false, "")) + cols = append(cols, createCol(parser, "PARAMETER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) infSchema["PARAMETERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) - cols = append(cols, createCol("PARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) - cols = append(cols, createCol("SUBPARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) - cols = append(cols, createCol("PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PARTITION_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("NODEGROUP", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_ORDINAL_POSITION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_METHOD", 6165, "utf8mb3_general_ci", "", 13, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "SUBPARTITION_EXPRESSION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_DESCRIPTION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PARTITION_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "NODEGROUP", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, false, "")) infSchema["PARTITIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("PLUGIN_DESCRIPTION", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) - cols = append(cols, createCol("PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_STATUS", 6165, "utf8mb3_general_ci", "", 10, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_TYPE_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, true, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LIBRARY_VERSION", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_AUTHOR", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_DESCRIPTION", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) + cols = append(cols, createCol(parser, "PLUGIN_LICENSE", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "LOAD_OPTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["PLUGINS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6165, "utf8mb3_general_ci", "", 261, 0, true, "")) - cols = append(cols, createCol("DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) - cols = append(cols, createCol("TIME", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INFO", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) + cols = append(cols, createCol(parser, "ID", 778, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "USER", 6165, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6165, "utf8mb3_general_ci", "", 261, 0, true, "")) + cols = append(cols, createCol(parser, "DB", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COMMAND", 6165, "utf8mb3_general_ci", "", 16, 0, true, "")) + cols = append(cols, createCol(parser, "TIME", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INFO", 6165, "utf8mb3_general_ci", "", 65535, 0, false, "")) infSchema["PROCESSLIST"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("QUERY_ID", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SEQ", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) - cols = append(cols, createCol("DURATION", 18, "utf8mb3_general_ci", "", 905, 0, true, "")) - cols = append(cols, createCol("CPU_USER", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) - cols = append(cols, createCol("CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) - cols = append(cols, createCol("CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SWAPS", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) - cols = append(cols, createCol("SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) - cols = append(cols, createCol("SOURCE_LINE", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "QUERY_ID", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SEQ", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "STATE", 6165, "utf8mb3_general_ci", "", 30, 0, true, "")) + cols = append(cols, createCol(parser, "DURATION", 18, "utf8mb3_general_ci", "", 905, 0, true, "")) + cols = append(cols, createCol(parser, "CPU_USER", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CPU_SYSTEM", 18, "utf8mb3_general_ci", "", 905, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_VOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CONTEXT_INVOLUNTARY", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_IN", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "BLOCK_OPS_OUT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_SENT", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MESSAGES_RECEIVED", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MAJOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PAGE_FAULTS_MINOR", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SWAPS", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FUNCTION", 6165, "utf8mb3_general_ci", "", 30, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_FILE", 6165, "utf8mb3_general_ci", "", 20, 0, false, "")) + cols = append(cols, createCol(parser, "SOURCE_LINE", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["PROFILING"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("MATCH_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NONE','PARTIAL','FULL'")) - cols = append(cols, createCol("UPDATE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) - cols = append(cols, createCol("DELETE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "UNIQUE_CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "MATCH_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NONE','PARTIAL','FULL'")) + cols = append(cols, createCol(parser, "UPDATE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "DELETE_RULE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO ACTION','RESTRICT','CASCADE','SET NULL','SET DEFAULT'")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "REFERENCED_TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["REFERENTIAL_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("RESOURCE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("RESOURCE_GROUP_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'SYSTEM','USER'")) - cols = append(cols, createCol("RESOURCE_GROUP_ENABLED", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) - cols = append(cols, createCol("VCPU_IDS", 10260, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("THREAD_PRIORITY", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'SYSTEM','USER'")) + cols = append(cols, createCol(parser, "RESOURCE_GROUP_ENABLED", 257, "utf8mb3_general_ci", "", 1, 0, true, "")) + cols = append(cols, createCol(parser, "VCPU_IDS", 10260, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "THREAD_PRIORITY", 263, "utf8mb3_general_ci", "", 0, 0, true, "")) infSchema["RESOURCE_GROUPS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','References'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','References'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_COLUMN_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Execute','Alter Routine','Grant'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Execute','Alter Routine','Grant'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_ROUTINE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) - cols = append(cols, createCol("GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger'")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTOR", 6165, "utf8mb3_general_ci", "", 97, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTOR_HOST", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE_HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6167, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'Select','Insert','Update','Delete','Create','Drop','Grant','References','Index','Alter','Create View','Show view','Trigger'")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["ROLE_TABLE_GRANTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) - cols = append(cols, createCol("DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NUMERIC_SCALE", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "SQL", 64, 0, true, "")) - cols = append(cols, createCol("PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("SQL_DATA_ACCESS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'CONTAINS SQL','NO SQL','READS SQL DATA','MODIFIES SQL DATA'")) - cols = append(cols, createCol("SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'DEFAULT','INVOKER','DEFINER'")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'FUNCTION','PROCEDURE'")) + cols = append(cols, createCol(parser, "DATA_TYPE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_MAXIMUM_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_OCTET_LENGTH", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NUMERIC_SCALE", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATETIME_PRECISION", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DTD_IDENTIFIER", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROUTINE_BODY", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "ROUTINE_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_NAME", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "EXTERNAL_LANGUAGE", 6165, "utf8mb3_general_ci", "SQL", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PARAMETER_STYLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "IS_DETERMINISTIC", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_DATA_ACCESS", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'CONTAINS SQL','NO SQL','READS SQL DATA','MODIFIES SQL DATA'")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'DEFAULT','INVOKER','DEFINER'")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "LAST_ALTERED", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "ROUTINE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["ROUTINES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["SCHEMA_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DEFAULT_ENCRYPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO','YES'")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_CHARACTER_SET_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DEFAULT_COLLATION_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_PATH", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFAULT_ENCRYPTION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'NO','YES'")) infSchema["SCHEMATA"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "CATALOG_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SCHEMA_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) infSchema["SCHEMATA_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("GEOMETRY_TYPE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "GEOMETRY_TYPE_NAME", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["ST_GEOMETRY_COLUMNS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) - cols = append(cols, createCol("SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ORGANIZATION", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("ORGANIZATION_COORDSYS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DEFINITION", 6165, "utf8mb3_general_ci", "", 4096, 0, true, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "SRS_NAME", 6165, "utf8mb3_general_ci", "", 80, 0, true, "")) + cols = append(cols, createCol(parser, "SRS_ID", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ORGANIZATION", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "ORGANIZATION_COORDSYS_ID", 776, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DEFINITION", 6165, "utf8mb3_general_ci", "", 4096, 0, true, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["ST_SPATIAL_REFERENCE_SYSTEMS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("UNIT_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) - cols = append(cols, createCol("UNIT_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) - cols = append(cols, createCol("CONVERSION_FACTOR", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DESCRIPTION", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "UNIT_NAME", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) + cols = append(cols, createCol(parser, "UNIT_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CONVERSION_FACTOR", 1036, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DESCRIPTION", 6165, "utf8mb3_general_ci", "", 255, 0, false, "")) infSchema["ST_UNITS_OF_MEASURE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("NON_UNIQUE", 263, "utf8mb3_general_ci", "0", 0, 0, true, "")) - cols = append(cols, createCol("INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SEQ_IN_INDEX", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) - cols = append(cols, createCol("CARDINALITY", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SUB_PART", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("PACKED", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) - cols = append(cols, createCol("COMMENT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) - cols = append(cols, createCol("INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) - cols = append(cols, createCol("IS_VISIBLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "NON_UNIQUE", 263, "utf8mb3_general_ci", "0", 0, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SEQ_IN_INDEX", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "COLUMN_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "COLLATION", 6165, "utf8mb3_general_ci", "", 1, 0, false, "")) + cols = append(cols, createCol(parser, "CARDINALITY", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SUB_PART", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "PACKED", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NULLABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "COMMENT", 6165, "utf8mb3_general_ci", "", 8, 0, true, "")) + cols = append(cols, createCol(parser, "INDEX_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, true, "")) + cols = append(cols, createCol(parser, "IS_VISIBLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "EXPRESSION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["STATISTICS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) - cols = append(cols, createCol("ENFORCED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_TYPE", 6165, "utf8mb3_general_ci", "", 11, 0, true, "")) + cols = append(cols, createCol(parser, "ENFORCED", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_CONSTRAINTS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "CONSTRAINT_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLE_CONSTRAINTS_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["TABLE_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BASE TABLE','VIEW','SYSTEM VIEW'")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VERSION", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ROW_FORMAT", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'Fixed','Dynamic','Compressed','Redundant','Compact','Paged'")) - cols = append(cols, createCol("TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTO_INCREMENT", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) - cols = append(cols, createCol("TABLE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_TYPE", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BASE TABLE','VIEW','SYSTEM VIEW'")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VERSION", 263, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ROW_FORMAT", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'Fixed','Dynamic','Compressed','Redundant','Compact','Paged'")) + cols = append(cols, createCol(parser, "TABLE_ROWS", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AVG_ROW_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAX_DATA_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "INDEX_LENGTH", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "DATA_FREE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTO_INCREMENT", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_TIME", 2061, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "UPDATE_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_TIME", 2064, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "CHECKSUM", 265, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CREATE_OPTIONS", 6165, "utf8mb3_general_ci", "", 256, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_COMMENT", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "SECONDARY_ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EXTENT_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("MAXIMUM_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("NODEGROUP_ID", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLESPACE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "LOGFILE_GROUP_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EXTENT_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "AUTOEXTEND_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "MAXIMUM_SIZE", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "NODEGROUP_ID", 778, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_COMMENT", 6165, "utf8mb3_general_ci", "", 2048, 0, false, "")) infSchema["TABLESPACES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) - cols = append(cols, createCol("ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "TABLESPACE_NAME", 6165, "utf8mb3_general_ci", "", 268, 0, true, "")) + cols = append(cols, createCol(parser, "ENGINE_ATTRIBUTE", 2078, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["TABLESPACES_EXTENSIONS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("EVENT_MANIPULATION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'INSERT','UPDATE','DELETE'")) - cols = append(cols, createCol("EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("ACTION_ORDER", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_CONDITION", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) - cols = append(cols, createCol("ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_TIMING", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BEFORE','AFTER'")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) - cols = append(cols, createCol("CREATED", 2061, "utf8mb3_general_ci", "", 2, 0, true, "")) - cols = append(cols, createCol("SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TRIGGER_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TRIGGER_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "EVENT_MANIPULATION", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'INSERT','UPDATE','DELETE'")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "EVENT_OBJECT_TABLE", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_ORDER", 776, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_CONDITION", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_STATEMENT", 6163, "utf8mb3_general_ci", "", 0, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_ORIENTATION", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_TIMING", 2074, "utf8mb3_general_ci", "", 0, 0, true, "'BEFORE','AFTER'")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_TABLE", 10264, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_OLD_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "ACTION_REFERENCE_NEW_ROW", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "CREATED", 2061, "utf8mb3_general_ci", "", 2, 0, true, "")) + cols = append(cols, createCol(parser, "SQL_MODE", 2075, "utf8mb3_general_ci", "", 0, 0, true, "'REAL_AS_FLOAT','PIPES_AS_CONCAT','ANSI_QUOTES','IGNORE_SPACE','NOT_USED','ONLY_FULL_GROUP_BY','NO_UNSIGNED_SUBTRACTION','NO_DIR_IN_CREATE','NOT_USED_9','NOT_USED_10','NOT_USED_11','NOT_USED_12','NOT_USED_13','NOT_USED_14','NOT_USED_15','NOT_USED_16','NOT_USED_17','NOT_USED_18','ANSI','NO_AUTO_VALUE_ON_ZERO','NO_BACKSLASH_ESCAPES','STRICT_TRANS_TABLES','STRICT_ALL_TABLES','NO_ZERO_IN_DATE','NO_ZERO_DATE','ALLOW_INVALID_DATES','ERROR_FOR_DIVISION_BY_ZERO','TRADITIONAL','NOT_USED_29','HIGH_NOT_PRECEDENCE','NO_ENGINE_SUBSTITUTION','PAD_CHAR_TO_FULL_LENGTH','TIME_TRUNCATE_FRACTIONAL'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, true, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "DATABASE_COLLATION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["TRIGGERS"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("USER", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) - cols = append(cols, createCol("HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) - cols = append(cols, createCol("ATTRIBUTE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "USER", 6167, "utf8mb3_general_ci", "", 32, 0, true, "")) + cols = append(cols, createCol(parser, "HOST", 6167, "utf8mb3_general_ci", "", 255, 0, true, "")) + cols = append(cols, createCol(parser, "ATTRIBUTE", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) infSchema["USER_ATTRIBUTES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) - cols = append(cols, createCol("PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) + cols = append(cols, createCol(parser, "GRANTEE", 6165, "utf8mb3_general_ci", "", 292, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 512, 0, true, "")) + cols = append(cols, createCol(parser, "PRIVILEGE_TYPE", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "IS_GRANTABLE", 6165, "utf8mb3_general_ci", "", 3, 0, true, "")) infSchema["USER_PRIVILEGES"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "SPECIFIC_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEW_ROUTINE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("VIEW_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) infSchema["VIEW_TABLE_USAGE"] = cols cols = []vindexes.Column{} - cols = append(cols, createCol("TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) - cols = append(cols, createCol("VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) - cols = append(cols, createCol("CHECK_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NONE','LOCAL','CASCADED'")) - cols = append(cols, createCol("IS_UPDATABLE", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NO','YES'")) - cols = append(cols, createCol("DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, false, "")) - cols = append(cols, createCol("SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) - cols = append(cols, createCol("CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) - cols = append(cols, createCol("COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "TABLE_CATALOG", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_SCHEMA", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "TABLE_NAME", 6165, "utf8mb3_general_ci", "", 64, 0, false, "")) + cols = append(cols, createCol(parser, "VIEW_DEFINITION", 6163, "utf8mb3_general_ci", "", 0, 0, false, "")) + cols = append(cols, createCol(parser, "CHECK_OPTION", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NONE','LOCAL','CASCADED'")) + cols = append(cols, createCol(parser, "IS_UPDATABLE", 2074, "utf8mb3_general_ci", "", 0, 0, false, "'NO','YES'")) + cols = append(cols, createCol(parser, "DEFINER", 6165, "utf8mb3_general_ci", "", 288, 0, false, "")) + cols = append(cols, createCol(parser, "SECURITY_TYPE", 6165, "utf8mb3_general_ci", "", 7, 0, false, "")) + cols = append(cols, createCol(parser, "CHARACTER_SET_CLIENT", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) + cols = append(cols, createCol(parser, "COLLATION_CONNECTION", 6165, "utf8mb3_general_ci", "", 64, 0, true, "")) infSchema["VIEWS"] = cols return infSchema } @@ -1637,6 +1645,11 @@ func (i *infoSchemaWithColumns) ConnCollation() collations.ID { return i.inner.ConnCollation() } +// CollationEnv implements the SchemaInformation interface +func (i *infoSchemaWithColumns) CollationEnv() *collations.Environment { + return i.inner.CollationEnv() +} + func (i *infoSchemaWithColumns) ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) { return i.inner.ForeignKeyMode(keyspace) } diff --git a/go/vt/vtgate/semantics/info_schema_gen_test.go b/go/vt/vtgate/semantics/info_schema_gen_test.go index da06b80ac30..c2c5e07bf82 100644 --- a/go/vt/vtgate/semantics/info_schema_gen_test.go +++ b/go/vt/vtgate/semantics/info_schema_gen_test.go @@ -41,7 +41,7 @@ func TestGenerateInfoSchemaMap(t *testing.T) { require.NoError(t, err) defer db.Close() - collationName := collations.Local().LookupName(collations.SystemCollation.Collation) + collationName := collations.MySQL8().LookupName(collations.SystemCollation.Collation) for _, tbl := range informationSchemaTables80 { result, err := db.Query(fmt.Sprintf("show columns from information_schema.`%s`", tbl)) diff --git a/go/vt/vtgate/semantics/real_table.go b/go/vt/vtgate/semantics/real_table.go index da55d95895f..72549b98e8c 100644 --- a/go/vt/vtgate/semantics/real_table.go +++ b/go/vt/vtgate/semantics/real_table.go @@ -19,6 +19,7 @@ package semantics import ( "strings" + "vitess.io/vitess/go/mysql/collations" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" @@ -31,6 +32,7 @@ type RealTable struct { ASTNode *sqlparser.AliasedTableExpr Table *vindexes.Table isInfSchema bool + collationEnv *collations.Environment } var _ TableInfo = (*RealTable)(nil) @@ -67,11 +69,11 @@ func (r *RealTable) IsInfSchema() bool { // GetColumns implements the TableInfo interface func (r *RealTable) getColumns() []ColumnInfo { - return vindexTableToColumnInfo(r.Table) + return vindexTableToColumnInfo(r.Table, r.collationEnv) } // GetExpr implements the TableInfo interface -func (r *RealTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (r *RealTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return r.ASTNode } @@ -115,7 +117,7 @@ func (r *RealTable) matches(name sqlparser.TableName) bool { return (name.Qualifier.IsEmpty() || name.Qualifier.String() == r.dbName) && r.tableName == name.Name.String() } -func vindexTableToColumnInfo(tbl *vindexes.Table) []ColumnInfo { +func vindexTableToColumnInfo(tbl *vindexes.Table, collationEnv *collations.Environment) []ColumnInfo { if tbl == nil { return nil } @@ -125,7 +127,7 @@ func vindexTableToColumnInfo(tbl *vindexes.Table) []ColumnInfo { cols = append(cols, ColumnInfo{ Name: col.Name.String(), - Type: col.ToEvalengineType(), + Type: col.ToEvalengineType(collationEnv), Invisible: col.Invisible, }) nameMap[col.Name.String()] = nil diff --git a/go/vt/vtgate/semantics/scoper.go b/go/vt/vtgate/semantics/scoper.go index c782da03678..878ac222911 100644 --- a/go/vt/vtgate/semantics/scoper.go +++ b/go/vt/vtgate/semantics/scoper.go @@ -197,7 +197,7 @@ func (s *scoper) up(cursor *sqlparser.Cursor) error { if isParentSelectStatement(cursor) { s.popScope() } - case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Delete, *sqlparser.Insert, *sqlparser.Union: + case *sqlparser.Select, sqlparser.GroupBy, *sqlparser.Update, *sqlparser.Insert, *sqlparser.Union: id := EmptyTableSet() for _, tableInfo := range s.currentScope().tables { set := tableInfo.getTableSet(s.org) diff --git a/go/vt/vtgate/semantics/semantic_state.go b/go/vt/vtgate/semantics/semantic_state.go index 6668070a575..7674a627b4e 100644 --- a/go/vt/vtgate/semantics/semantic_state.go +++ b/go/vt/vtgate/semantics/semantic_state.go @@ -50,7 +50,7 @@ type ( authoritative() bool // getAliasedTableExpr returns the AST struct behind this table - getAliasedTableExpr() *sqlparser.AliasedTableExpr + GetAliasedTableExpr() *sqlparser.AliasedTableExpr // canShortCut will return nil when the keyspace needs to be checked, // and a true/false if the decision has been made already @@ -117,6 +117,8 @@ type ( // It doesn't recurse inside derived tables to find the original dependencies. Direct ExprDependencies + Targets map[sqlparser.IdentifierCS]TableSet + // ColumnEqualities is used for transitive closures (e.g., if a == b and b == c, then a == c). ColumnEqualities map[columnName][]sqlparser.Expr @@ -150,6 +152,7 @@ type ( SchemaInformation interface { FindTableOrVindex(tablename sqlparser.TableName) (*vindexes.Table, vindexes.Vindex, string, topodatapb.TabletType, key.Destination, error) ConnCollation() collations.ID + CollationEnv() *collations.Environment // ForeignKeyMode returns the foreign_key flag value ForeignKeyMode(keyspace string) (vschemapb.Keyspace_ForeignKeyMode, error) GetForeignKeyChecksState() *bool @@ -516,7 +519,7 @@ func EmptySemTable() *SemTable { // TableSetFor returns the bitmask for this particular table func (st *SemTable) TableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for idx, t2 := range st.Tables { - if t == t2.getAliasedTableExpr() { + if t == t2.GetAliasedTableExpr() { return SingleTableSet(idx) } } diff --git a/go/vt/vtgate/semantics/semantic_state_test.go b/go/vt/vtgate/semantics/semantic_state_test.go index b904f3656de..4ae0a5562b5 100644 --- a/go/vt/vtgate/semantics/semantic_state_test.go +++ b/go/vt/vtgate/semantics/semantic_state_test.go @@ -46,7 +46,7 @@ func TestBindingAndExprEquality(t *testing.T) { for _, test := range tests { t.Run(test.expressions, func(t *testing.T) { - parse, err := sqlparser.Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) + parse, err := sqlparser.NewTestParser().Parse(fmt.Sprintf("select %s from t1, t2", test.expressions)) require.NoError(t, err) st, err := Analyze(parse, "db", fakeSchemaInfoTest()) require.NoError(t, err) @@ -853,7 +853,7 @@ func TestIsFkDependentColumnUpdated(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err) semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) require.NoError(t, err) @@ -970,7 +970,7 @@ func TestHasNonLiteralForeignKeyUpdate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - stmt, err := sqlparser.Parse(tt.query) + stmt, err := sqlparser.NewTestParser().Parse(tt.query) require.NoError(t, err) semTable, err := Analyze(stmt, keyspaceName, tt.fakeSi) require.NoError(t, err) diff --git a/go/vt/vtgate/semantics/table_collector.go b/go/vt/vtgate/semantics/table_collector.go index 12fb691874f..bcf0402433a 100644 --- a/go/vt/vtgate/semantics/table_collector.go +++ b/go/vt/vtgate/semantics/table_collector.go @@ -207,7 +207,7 @@ func newVindexTable(t sqlparser.IdentifierCS) *vindexes.Table { // The code lives in this file since it is only touching tableCollector data func (tc *tableCollector) tableSetFor(t *sqlparser.AliasedTableExpr) TableSet { for i, t2 := range tc.Tables { - if t == t2.getAliasedTableExpr() { + if t == t2.GetAliasedTableExpr() { return SingleTableSet(i) } } @@ -231,10 +231,11 @@ func (tc *tableCollector) createTable( vindex vindexes.Vindex, ) TableInfo { table := &RealTable{ - tableName: alias.As.String(), - ASTNode: alias, - Table: tbl, - isInfSchema: isInfSchema, + tableName: alias.As.String(), + ASTNode: alias, + Table: tbl, + isInfSchema: isInfSchema, + collationEnv: tc.si.CollationEnv(), } if alias.As.IsEmpty() { diff --git a/go/vt/vtgate/semantics/typer.go b/go/vt/vtgate/semantics/typer.go index 8b44105d255..a9c783cee18 100644 --- a/go/vt/vtgate/semantics/typer.go +++ b/go/vt/vtgate/semantics/typer.go @@ -27,12 +27,14 @@ import ( // typer is responsible for setting the type for expressions // it does it's work after visiting the children (up), since the children types is often needed to type a node. type typer struct { - m map[sqlparser.Expr]evalengine.Type + m map[sqlparser.Expr]evalengine.Type + collationEnv *collations.Environment } -func newTyper() *typer { +func newTyper(collationEnv *collations.Environment) *typer { return &typer{ - m: map[sqlparser.Expr]evalengine.Type{}, + m: map[sqlparser.Expr]evalengine.Type{}, + collationEnv: collationEnv, } } @@ -43,10 +45,10 @@ func (t *typer) exprType(expr sqlparser.Expr) evalengine.Type { func (t *typer) up(cursor *sqlparser.Cursor) error { switch node := cursor.Node().(type) { case *sqlparser.Literal: - t.m[node] = evalengine.NewType(node.SQLType(), collations.DefaultCollationForType(node.SQLType())) + t.m[node] = evalengine.NewType(node.SQLType(), collations.CollationForType(node.SQLType(), t.collationEnv.DefaultConnectionCharset())) case *sqlparser.Argument: if node.Type >= 0 { - t.m[node] = evalengine.NewType(node.Type, collations.DefaultCollationForType(node.Type)) + t.m[node] = evalengine.NewType(node.Type, collations.CollationForType(node.Type, t.collationEnv.DefaultConnectionCharset())) } case sqlparser.AggrFunc: code, ok := opcode.SupportedAggregates[node.AggrName()] @@ -62,7 +64,7 @@ func (t *typer) up(cursor *sqlparser.Cursor) error { type_ := code.Type(inputType) _, isCount := node.(*sqlparser.Count) _, isCountStart := node.(*sqlparser.CountStar) - t.m[node] = evalengine.NewTypeEx(type_, collations.DefaultCollationForType(type_), !(isCount || isCountStart), 0, 0) + t.m[node] = evalengine.NewTypeEx(type_, collations.CollationForType(type_, t.collationEnv.DefaultConnectionCharset()), !(isCount || isCountStart), 0, 0) } return nil } diff --git a/go/vt/vtgate/semantics/typer_test.go b/go/vt/vtgate/semantics/typer_test.go index c5417edbf64..c87d5672dab 100644 --- a/go/vt/vtgate/semantics/typer_test.go +++ b/go/vt/vtgate/semantics/typer_test.go @@ -40,7 +40,7 @@ func TestNormalizerAndSemanticAnalysisIntegration(t *testing.T) { for _, test := range tests { t.Run(test.query, func(t *testing.T) { - parse, err := sqlparser.Parse(test.query) + parse, err := sqlparser.NewTestParser().Parse(test.query) require.NoError(t, err) err = sqlparser.Normalize(parse, sqlparser.NewReservedVars("bv", sqlparser.BindVars{}), map[string]*querypb.BindVariable{}) diff --git a/go/vt/vtgate/semantics/vindex_table.go b/go/vt/vtgate/semantics/vindex_table.go index f78e68cbd5b..fba8f8ab9a0 100644 --- a/go/vt/vtgate/semantics/vindex_table.go +++ b/go/vt/vtgate/semantics/vindex_table.go @@ -67,8 +67,8 @@ func (v *VindexTable) Name() (sqlparser.TableName, error) { } // GetExpr implements the TableInfo interface -func (v *VindexTable) getAliasedTableExpr() *sqlparser.AliasedTableExpr { - return v.Table.getAliasedTableExpr() +func (v *VindexTable) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { + return v.Table.GetAliasedTableExpr() } func (v *VindexTable) canShortCut() shortCut { diff --git a/go/vt/vtgate/semantics/vtable.go b/go/vt/vtgate/semantics/vtable.go index 271da126cd4..133e38ff505 100644 --- a/go/vt/vtgate/semantics/vtable.go +++ b/go/vt/vtgate/semantics/vtable.go @@ -70,7 +70,7 @@ func (v *vTableInfo) Name() (sqlparser.TableName, error) { return sqlparser.TableName{}, vterrors.Errorf(vtrpcpb.Code_INTERNAL, "oh noes") } -func (v *vTableInfo) getAliasedTableExpr() *sqlparser.AliasedTableExpr { +func (v *vTableInfo) GetAliasedTableExpr() *sqlparser.AliasedTableExpr { return nil } diff --git a/go/vt/vtgate/simplifier/simplifier_test.go b/go/vt/vtgate/simplifier/simplifier_test.go index e2270a551b5..c8b052d7a9c 100644 --- a/go/vt/vtgate/simplifier/simplifier_test.go +++ b/go/vt/vtgate/simplifier/simplifier_test.go @@ -49,7 +49,7 @@ order by unsharded.orderByExpr2 asc limit 123 offset 456 ` - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Printf(">> found expression: %s\n", sqlparser.String(cursor.expr)) @@ -67,7 +67,7 @@ limit 123 offset 456 func TestAbortExpressionCursor(t *testing.T) { query := "select user.id, count(*), unsharded.name from user join unsharded on 13 = 14 where unsharded.id = 42 and name = 'foo' and user.id = unsharded.id" - ast, err := sqlparser.Parse(query) + ast, err := sqlparser.NewTestParser().Parse(query) require.NoError(t, err) visitAllExpressionsInAST(ast.(sqlparser.SelectStatement), func(cursor expressionCursor) bool { fmt.Println(sqlparser.String(cursor.expr)) @@ -120,15 +120,19 @@ func TestSimplifyEvalEngineExpr(t *testing.T) { p0 := plus(p11, p12) expr := SimplifyExpr(p0, func(expr sqlparser.Expr) bool { - local, err := evalengine.Translate(expr, nil) + collationEnv := collations.MySQL8() + local, err := evalengine.Translate(expr, &evalengine.Config{ + CollationEnv: collationEnv, + Collation: collationEnv.DefaultConnectionCharset(), + }) if err != nil { return false } - res, err := evalengine.EmptyExpressionEnv().Evaluate(local) + res, err := evalengine.EmptyExpressionEnv(collationEnv).Evaluate(local) if err != nil { return false } - toInt64, err := res.Value(collations.Default()).ToInt64() + toInt64, err := res.Value(collationEnv.DefaultConnectionCharset()).ToInt64() if err != nil { return false } diff --git a/go/vt/vtgate/tabletgateway.go b/go/vt/vtgate/tabletgateway.go index 7703f47c4fa..d1846168a43 100644 --- a/go/vt/vtgate/tabletgateway.go +++ b/go/vt/vtgate/tabletgateway.go @@ -71,7 +71,7 @@ type TabletGateway struct { srvTopoServer srvtopo.Server localCell string retryCount int - defaultConnCollation uint32 + defaultConnCollation atomic.Uint32 // mu protects the fields of this group. mu sync.Mutex @@ -431,17 +431,17 @@ func (gw *TabletGateway) TabletsHealthyStatus() discovery.TabletsCacheStatusList } func (gw *TabletGateway) updateDefaultConnCollation(tablet *topodatapb.Tablet) { - if atomic.CompareAndSwapUint32(&gw.defaultConnCollation, 0, tablet.DefaultConnCollation) { + if gw.defaultConnCollation.CompareAndSwap(0, tablet.DefaultConnCollation) { return } - if atomic.LoadUint32(&gw.defaultConnCollation) != tablet.DefaultConnCollation { + if gw.defaultConnCollation.Load() != tablet.DefaultConnCollation { log.Warning("this Vitess cluster has tablets with different default connection collations") } } // DefaultConnCollation returns the default connection collation of this TabletGateway func (gw *TabletGateway) DefaultConnCollation() collations.ID { - return collations.ID(atomic.LoadUint32(&gw.defaultConnCollation)) + return collations.ID(gw.defaultConnCollation.Load()) } // NewShardError returns a new error with the shard info amended. diff --git a/go/vt/vtgate/tabletgateway_flaky_test.go b/go/vt/vtgate/tabletgateway_flaky_test.go index 917d931d2ff..19894b0002e 100644 --- a/go/vt/vtgate/tabletgateway_flaky_test.go +++ b/go/vt/vtgate/tabletgateway_flaky_test.go @@ -69,7 +69,7 @@ func TestGatewayBufferingWhenPrimarySwitchesServingState(t *testing.T) { Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -158,7 +158,7 @@ func TestGatewayBufferingWhileReparenting(t *testing.T) { Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ @@ -287,7 +287,7 @@ func TestInconsistentStateDetectedBuffering(t *testing.T) { Fields: []*querypb.Field{{ Name: "col1", Type: sqltypes.VarChar, - Charset: uint32(collations.Default()), + Charset: uint32(collations.MySQL8().DefaultConnectionCharset()), }}, RowsAffected: 1, Rows: [][]sqltypes.Value{{ diff --git a/go/vt/vtgate/vcursor_impl.go b/go/vt/vtgate/vcursor_impl.go index 616b1ce8846..17ed098c256 100644 --- a/go/vt/vtgate/vcursor_impl.go +++ b/go/vt/vtgate/vcursor_impl.go @@ -83,6 +83,9 @@ type iExecute interface { ParseDestinationTarget(targetString string) (string, topodatapb.TabletType, key.Destination, error) VSchema() *vindexes.VSchema planPrepareStmt(ctx context.Context, vcursor *vcursorImpl, query string) (*engine.Plan, sqlparser.Statement, error) + + collationEnv() *collations.Environment + sqlparser() *sqlparser.Parser } // VSchemaOperator is an interface to Vschema Operations @@ -162,7 +165,7 @@ func newVCursorImpl( } } if connCollation == collations.Unknown { - connCollation = collations.Default() + connCollation = executor.collEnv.DefaultConnectionCharset() } warmingReadsPct := 0 @@ -206,6 +209,15 @@ func (vc *vcursorImpl) ConnCollation() collations.ID { return vc.collation } +// ConnCollation returns the collation of this session +func (vc *vcursorImpl) CollationEnv() *collations.Environment { + return vc.executor.collationEnv() +} + +func (vc *vcursorImpl) SQLParser() *sqlparser.Parser { + return vc.executor.sqlparser() +} + func (vc *vcursorImpl) TimeZone() *time.Location { return vc.safeSession.TimeZone() } @@ -1082,7 +1094,7 @@ func (vc *vcursorImpl) keyForPlan(ctx context.Context, query string, buf io.Stri _, _ = buf.WriteString(vc.keyspace) _, _ = buf.WriteString(vindexes.TabletTypeSuffix[vc.tabletType]) _, _ = buf.WriteString("+Collate:") - _, _ = buf.WriteString(collations.Local().LookupName(vc.collation)) + _, _ = buf.WriteString(vc.CollationEnv().LookupName(vc.collation)) if vc.destination != nil { switch vc.destination.(type) { @@ -1240,7 +1252,7 @@ func (vc *vcursorImpl) ThrottleApp(ctx context.Context, throttledAppRule *topoda } func (vc *vcursorImpl) CanUseSetVar() bool { - return sqlparser.IsMySQL80AndAbove() && setVarEnabled + return vc.SQLParser().IsMySQL80AndAbove() && setVarEnabled } func (vc *vcursorImpl) ReleaseLock(ctx context.Context) error { @@ -1269,7 +1281,7 @@ func (vc *vcursorImpl) cloneWithAutocommitSession() *vcursorImpl { } func (vc *vcursorImpl) VExplainLogging() { - vc.safeSession.EnableLogging() + vc.safeSession.EnableLogging(vc.SQLParser()) } func (vc *vcursorImpl) GetVExplainLogs() []engine.ExecuteEntry { diff --git a/go/vt/vtgate/vcursor_impl_test.go b/go/vt/vtgate/vcursor_impl_test.go index 77be183eacd..b8e4a0d3a0a 100644 --- a/go/vt/vtgate/vcursor_impl_test.go +++ b/go/vt/vtgate/vcursor_impl_test.go @@ -184,9 +184,10 @@ func TestDestinationKeyspace(t *testing.T) { expectedError: errNoKeyspace.Error(), }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(strconv.Itoa(i)+tc.targetString, func(t *testing.T) { - impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) + impl, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{TargetString: tc.targetString}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) impl.vschema = tc.vschema dest, keyspace, tabletType, err := impl.TargetDestination(tc.qualifier) if tc.expectedError == "" { @@ -242,9 +243,10 @@ func TestSetTarget(t *testing.T) { expectedError: "can't execute the given command because you have an active transaction", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { - vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) + vc, _ := newVCursorImpl(NewSafeSession(&vtgatepb.Session{InTransaction: true}), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, nil, nil, false, querypb.ExecuteOptions_Gen4) vc.vschema = tc.vschema err := vc.SetTarget(tc.targetString) if tc.expectedError == "" { @@ -290,11 +292,12 @@ func TestKeyForPlan(t *testing.T) { expectedPlanPrefixKey: "ks1@replica+Collate:utf8mb4_0900_ai_ci+Query:SELECT 1", }} + r, _, _, _, _ := createExecutorEnv(t) for i, tc := range tests { t.Run(fmt.Sprintf("%d#%s", i, tc.targetString), func(t *testing.T) { ss := NewSafeSession(&vtgatepb.Session{InTransaction: false}) ss.SetTargetString(tc.targetString) - vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) + vc, err := newVCursorImpl(ss, sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: tc.vschema}, tc.vschema, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) vc.vschema = tc.vschema @@ -316,7 +319,8 @@ func TestFirstSortedKeyspace(t *testing.T) { ks3Schema.Keyspace.Name: ks3Schema, }} - vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, nil, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) + r, _, _, _, _ := createExecutorEnv(t) + vc, err := newVCursorImpl(NewSafeSession(nil), sqlparser.MarginComments{}, r, nil, &fakeVSchemaOperator{vschema: vschemaWith2KS}, vschemaWith2KS, srvtopo.NewResolver(&fakeTopoServer{}, nil, ""), nil, false, querypb.ExecuteOptions_Gen4) require.NoError(t, err) ks, err := vc.FirstSortedKeyspace() require.NoError(t, err) diff --git a/go/vt/vtgate/vindexes/consistent_lookup.go b/go/vt/vtgate/vindexes/consistent_lookup.go index 9173ded96e6..cc74966c197 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup.go +++ b/go/vt/vtgate/vindexes/consistent_lookup.go @@ -413,7 +413,7 @@ func (lu *clCommon) Delete(ctx context.Context, vcursor VCursor, rowsColValues [ func (lu *clCommon) Update(ctx context.Context, vcursor VCursor, oldValues []sqltypes.Value, ksid []byte, newValues []sqltypes.Value) error { equal := true for i := range oldValues { - result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.ConnCollation()) + result, err := evalengine.NullsafeCompare(oldValues[i], newValues[i], vcursor.CollationEnv(), vcursor.ConnCollation()) // errors from NullsafeCompare can be ignored. if they are real problems, we'll see them in the Create/Update if err != nil || result != 0 { equal = false diff --git a/go/vt/vtgate/vindexes/consistent_lookup_test.go b/go/vt/vtgate/vindexes/consistent_lookup_test.go index deecc23ebdd..832a16fae9f 100644 --- a/go/vt/vtgate/vindexes/consistent_lookup_test.go +++ b/go/vt/vtgate/vindexes/consistent_lookup_test.go @@ -528,7 +528,11 @@ func (vc *loggingVCursor) InTransactionAndIsDML() bool { } func (vc *loggingVCursor) ConnCollation() collations.ID { - return collations.Default() + return vc.CollationEnv().DefaultConnectionCharset() +} + +func (vc *loggingVCursor) CollationEnv() *collations.Environment { + return collations.MySQL8() } type bv struct { diff --git a/go/vt/vtgate/vindexes/lookup_test.go b/go/vt/vtgate/vindexes/lookup_test.go index b82ab3d4fec..736017f89eb 100644 --- a/go/vt/vtgate/vindexes/lookup_test.go +++ b/go/vt/vtgate/vindexes/lookup_test.go @@ -115,7 +115,11 @@ func (vc *vcursor) execute(query string, bindvars map[string]*querypb.BindVariab } func (vc *vcursor) ConnCollation() collations.ID { - return collations.Default() + return vc.CollationEnv().DefaultConnectionCharset() +} + +func (vc *vcursor) CollationEnv() *collations.Environment { + return collations.MySQL8() } func lookupCreateVindexTestCase( diff --git a/go/vt/vtgate/vindexes/vindex.go b/go/vt/vtgate/vindexes/vindex.go index a5295681248..e9e346c7b89 100644 --- a/go/vt/vtgate/vindexes/vindex.go +++ b/go/vt/vtgate/vindexes/vindex.go @@ -44,6 +44,7 @@ type ( InTransactionAndIsDML() bool LookupRowLockShardSession() vtgatepb.CommitOrder ConnCollation() collations.ID + CollationEnv() *collations.Environment } // Vindex defines the interface required to register a vindex. diff --git a/go/vt/vtgate/vindexes/vschema.go b/go/vt/vtgate/vindexes/vschema.go index e724794b6cb..c20f5561566 100644 --- a/go/vt/vtgate/vindexes/vschema.go +++ b/go/vt/vtgate/vindexes/vschema.go @@ -55,6 +55,7 @@ var TabletTypeSuffix = map[topodatapb.TabletType]string{ // The following constants represent table types. const ( + TypeTable = "" TypeSequence = "sequence" TypeReference = "reference" ) @@ -218,10 +219,10 @@ func (col *Column) MarshalJSON() ([]byte, error) { return json.Marshal(cj) } -func (col *Column) ToEvalengineType() evalengine.Type { - collation := collations.DefaultCollationForType(col.Type) +func (col *Column) ToEvalengineType(collationEnv *collations.Environment) evalengine.Type { + collation := collations.CollationForType(col.Type, collationEnv.DefaultConnectionCharset()) if sqltypes.IsText(col.Type) { - coll, found := collations.Local().LookupID(col.CollationName) + coll, found := collationEnv.LookupID(col.CollationName) if found { collation = coll } @@ -309,7 +310,7 @@ func (source *Source) String() string { } // BuildVSchema builds a VSchema from a SrvVSchema. -func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { +func BuildVSchema(source *vschemapb.SrvVSchema, parser *sqlparser.Parser) (vschema *VSchema) { vschema = &VSchema{ RoutingRules: make(map[string]*RoutingRule), globalTables: make(map[string]*Table), @@ -317,22 +318,22 @@ func BuildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { Keyspaces: make(map[string]*KeyspaceSchema), created: time.Now(), } - buildKeyspaces(source, vschema) + buildKeyspaces(source, vschema, parser) // buildGlobalTables before buildReferences so that buildReferences can // resolve sources which reference global tables. buildGlobalTables(source, vschema) buildReferences(source, vschema) - buildRoutingRule(source, vschema) + buildRoutingRule(source, vschema, parser) buildShardRoutingRule(source, vschema) // Resolve auto-increments after routing rules are built since sequence tables also obey routing rules. - resolveAutoIncrement(source, vschema) + resolveAutoIncrement(source, vschema, parser) return vschema } // BuildKeyspaceSchema builds the vschema portion for one keyspace. // The build ignores sequence references because those dependencies can // go cross-keyspace. -func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceSchema, error) { +func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string, parser *sqlparser.Parser) (*KeyspaceSchema, error) { if input == nil { input = &vschemapb.Keyspace{} } @@ -346,18 +347,18 @@ func BuildKeyspaceSchema(input *vschemapb.Keyspace, keyspace string) (*KeyspaceS uniqueVindexes: make(map[string]Vindex), Keyspaces: make(map[string]*KeyspaceSchema), } - buildKeyspaces(formal, vschema) + buildKeyspaces(formal, vschema, parser) err := vschema.Keyspaces[keyspace].Error return vschema.Keyspaces[keyspace], err } // BuildKeyspace ensures that the keyspace vschema is valid. // External references (like sequence) are not validated. -func BuildKeyspace(input *vschemapb.Keyspace) (*KeyspaceSchema, error) { - return BuildKeyspaceSchema(input, "") +func BuildKeyspace(input *vschemapb.Keyspace, parser *sqlparser.Parser) (*KeyspaceSchema, error) { + return BuildKeyspaceSchema(input, "", parser) } -func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := &KeyspaceSchema{ Keyspace: &Keyspace{ @@ -369,7 +370,7 @@ func buildKeyspaces(source *vschemapb.SrvVSchema, vschema *VSchema) { Vindexes: make(map[string]Vindex), } vschema.Keyspaces[ksname] = ksvschema - ksvschema.Error = buildTables(ks, vschema, ksvschema) + ksvschema.Error = buildTables(ks, vschema, ksvschema, parser) } } @@ -381,12 +382,12 @@ func replaceUnspecifiedForeignKeyMode(fkMode vschemapb.Keyspace_ForeignKeyMode) return fkMode } -func (vschema *VSchema) AddView(ksname string, viewName, query string) error { +func (vschema *VSchema) AddView(ksname, viewName, query string, parser *sqlparser.Parser) error { ks, ok := vschema.Keyspaces[ksname] if !ok { return fmt.Errorf("keyspace %s not found in vschema", ksname) } - ast, err := sqlparser.Parse(query) + ast, err := parser.Parse(query) if err != nil { return err } @@ -555,7 +556,7 @@ func buildKeyspaceReferences(vschema *VSchema, ksvschema *KeyspaceSchema) error return nil } -func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema) error { +func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSchema, parser *sqlparser.Parser) error { keyspace := ksvschema.Keyspace for vname, vindexInfo := range ks.Vindexes { vindex, err := CreateVindex(vindexInfo.Type, vname, vindexInfo.Params) @@ -650,7 +651,7 @@ func buildTables(ks *vschemapb.Keyspace, vschema *VSchema, ksvschema *KeyspaceSc var colDefault sqlparser.Expr if col.Default != "" { var err error - colDefault, err = sqlparser.ParseExpr(col.Default) + colDefault, err = parser.ParseExpr(col.Default) if err != nil { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "could not parse the '%s' column's default expression '%s' for table '%s'", col.Name, col.Default, tname) @@ -809,7 +810,7 @@ func (vschema *VSchema) addTableName(t *Table) { } } -func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { +func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { for ksname, ks := range source.Keyspaces { ksvschema := vschema.Keyspaces[ksname] for tname, table := range ks.Tables { @@ -817,7 +818,7 @@ func resolveAutoIncrement(source *vschemapb.SrvVSchema, vschema *VSchema) { if t == nil || table.AutoIncrement == nil { continue } - seqks, seqtab, err := sqlparser.ParseTable(table.AutoIncrement.Sequence) + seqks, seqtab, err := parser.ParseTable(table.AutoIncrement.Sequence) var seq *Table if err == nil { // Ensure that sequence tables also obey routing rules. @@ -893,7 +894,7 @@ func parseTable(tableName string) (sqlparser.TableName, error) { }, nil } -func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema) { +func buildRoutingRule(source *vschemapb.SrvVSchema, vschema *VSchema, parser *sqlparser.Parser) { var err error if source.RoutingRules == nil { return @@ -936,7 +937,7 @@ outer: continue outer } - toKeyspace, toTableName, err := sqlparser.ParseTable(toTable) + toKeyspace, toTableName, err := parser.ParseTable(toTable) if err != nil { vschema.RoutingRules[rule.FromTable] = &RoutingRule{ diff --git a/go/vt/vtgate/vindexes/vschema_test.go b/go/vt/vtgate/vindexes/vschema_test.go index ebcb39fef29..ad892a66ccb 100644 --- a/go/vt/vtgate/vindexes/vschema_test.go +++ b/go/vt/vtgate/vindexes/vschema_test.go @@ -235,7 +235,7 @@ func init() { } func buildVSchema(source *vschemapb.SrvVSchema) (vschema *VSchema) { - vs := BuildVSchema(source) + vs := BuildVSchema(source, sqlparser.NewTestParser()) if vs != nil { vs.ResetCreated() } @@ -247,7 +247,7 @@ func TestUnshardedVSchemaValid(t *testing.T) { Sharded: false, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) } @@ -282,7 +282,7 @@ func TestForeignKeyMode(t *testing.T) { ForeignKeyMode: test.fkMode, Vindexes: make(map[string]*vschemapb.Vindex), Tables: make(map[string]*vschemapb.Table), - }) + }, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, test.wantedFkMode, ksSchema.ForeignKeyMode) }) @@ -297,7 +297,7 @@ func TestUnshardedVSchema(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t1": {}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) table, err := got.FindTable("unsharded", "t1") @@ -322,7 +322,7 @@ func TestVSchemaColumns(t *testing.T) { {Name: "c4", Type: sqltypes.TypeJSON, Default: "json_array()"}, }}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, got.Keyspaces["unsharded"].Error) t1, err := got.FindTable("unsharded", "t1") @@ -352,11 +352,11 @@ func TestVSchemaViews(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) // add view to unsharded keyspace. - vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1") + vschema.AddView("unsharded", "v1", "SELECT c1+c2 AS added FROM t1", sqlparser.NewTestParser()) view := vschema.FindView("unsharded", "v1") assert.Equal(t, "select c1 + c2 as added from t1", sqlparser.String(view)) @@ -411,7 +411,7 @@ func TestVSchemaForeignKeys(t *testing.T) { }, { Name: "c2", Type: sqltypes.VarChar}}}}}}} - vschema := BuildVSchema(&good) + vschema := BuildVSchema(&good, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["main"].Error) // add fk constraints to a keyspace. @@ -474,7 +474,7 @@ func TestVSchemaColumnListAuthoritative(t *testing.T) { Type: sqltypes.VarChar}}, ColumnListAuthoritative: true}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) t1, err := got.FindTable("unsharded", "t1") require.NoError(t, err) @@ -493,7 +493,7 @@ func TestVSchemaColumnsFail(t *testing.T) { Name: "c1"}, { Name: "c1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) require.EqualError(t, got.Keyspaces["unsharded"].Error, "duplicate column name 'c1' for table: t1") } @@ -506,7 +506,7 @@ func TestVSchemaPinned(t *testing.T) { "t1": { Pinned: "80"}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -538,7 +538,7 @@ func TestShardedVSchemaOwned(t *testing.T) { Column: "c2", Name: "stln1"}}}}}}} - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) @@ -608,7 +608,7 @@ func TestShardedVSchemaOwnerInfo(t *testing.T) { }, }, } - got := BuildVSchema(&good) + got := BuildVSchema(&good, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error require.NoError(t, err) results := []struct { @@ -710,7 +710,7 @@ func TestVSchemaRoutingRules(t *testing.T) { }, }, } - got := BuildVSchema(&input) + got := BuildVSchema(&input, sqlparser.NewTestParser()) ks1 := &Keyspace{ Name: "ks1", Sharded: true, @@ -958,7 +958,7 @@ func TestFindBestColVindex(t *testing.T) { Tables: map[string]*vschemapb.Table{ "t2": {}}}}} - vs := BuildVSchema(testSrvVSchema) + vs := BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { tablename string @@ -1274,7 +1274,7 @@ func TestBuildVSchemaVindexNotFoundFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `vindexType "noexist" not found` if err == nil || err.Error() != want { @@ -1298,7 +1298,7 @@ func TestBuildVSchemaNoColumnVindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "missing primary col vindex for table: t1" if err == nil || err.Error() != want { @@ -1583,7 +1583,7 @@ func TestBuildVSchemaNoindexFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "vindex notexist not found for table t1" if err == nil || err.Error() != want { @@ -1615,7 +1615,7 @@ func TestBuildVSchemaColumnAndColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `can't use column and columns at the same time in vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1645,7 +1645,7 @@ func TestBuildVSchemaNoColumnsFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := `must specify at least one column for vindex (stfu) and table (t1)` if err == nil || err.Error() != want { @@ -1676,7 +1676,7 @@ func TestBuildVSchemaNotUniqueFail(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stln is not Unique for table t1" if err == nil || err.Error() != want { @@ -1708,7 +1708,7 @@ func TestBuildVSchemaPrimaryCannotBeOwned(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "primary vindex stlu cannot be owned for table t1" if err == nil || err.Error() != want { @@ -1736,7 +1736,7 @@ func TestBuildVSchemaReferenceTableSourceMayBeUnqualified(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["unsharded"].Error) require.NoError(t, vschema.Keyspaces["sharded"].Error) } @@ -1768,7 +1768,7 @@ func TestBuildVSchemaReferenceTableSourceMustBeInDifferentKeyspace(t *testing.T) }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"sharded.src\" may not reference a table in the same keyspace as table: ref") @@ -1788,7 +1788,7 @@ func TestBuildVSchemaReferenceTableSourceKeyspaceMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a non-existent keyspace \"unsharded\"") @@ -1814,7 +1814,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustExist(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" references a table \"src\" that is not present in the VSchema of keyspace \"unsharded\"") @@ -1852,7 +1852,7 @@ func TestBuildVSchemaReferenceTableSourceMayUseShardedKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.NoError(t, vschema.Keyspaces["sharded1"].Error) require.NoError(t, vschema.Keyspaces["sharded2"].Error) } @@ -1919,7 +1919,7 @@ func TestBuildVSchemaReferenceTableSourceTableMustBeBasicOrReferenceWithoutSourc }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded1"].Error) require.EqualError(t, vschema.Keyspaces["sharded1"].Error, "source \"unsharded1.src1\" may not reference a table of type \"sequence\": ref1") @@ -1953,7 +1953,7 @@ func TestBuildVSchemaSourceMayBeReferencedAtMostOncePerKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["sharded"].Error) require.EqualError(t, vschema.Keyspaces["sharded"].Error, "source \"unsharded.src\" may not be referenced more than once per keyspace: ref1, ref2") @@ -1991,7 +1991,7 @@ func TestBuildVSchemaMayNotChainReferences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) require.Error(t, vschema.Keyspaces["unsharded1"].Error) require.EqualError(t, vschema.Keyspaces["unsharded1"].Error, "reference chaining is not allowed ref => unsharded2.ref => unsharded3.ref: ref") @@ -2193,7 +2193,7 @@ func TestBadSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "cannot resolve sequence invalid_seq: table invalid_seq not found" if err == nil || err.Error() != want { @@ -2241,7 +2241,7 @@ func TestBadSequenceName(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "invalid table name: a.b.seq" if err == nil || !strings.Contains(err.Error(), want) { @@ -2265,7 +2265,7 @@ func TestBadShardedSequence(t *testing.T) { }, }, } - got := BuildVSchema(&bad) + got := BuildVSchema(&bad, sqlparser.NewTestParser()) err := got.Keyspaces["sharded"].Error want := "sequence table has to be in an unsharded keyspace or must be pinned: t1" if err == nil || err.Error() != want { @@ -2316,7 +2316,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "t1") require.EqualError(t, err, "ambiguous table reference: t1") @@ -2440,7 +2440,7 @@ func TestFindTableOrVindex(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) ta := vschema.Keyspaces["ksa"].Tables["ta"] t1 := vschema.Keyspaces["ksb"].Tables["t1"] @@ -2543,7 +2543,7 @@ func TestBuildKeyspaceSchema(t *testing.T) { "t2": {}, }, } - got, _ := BuildKeyspaceSchema(good, "ks") + got, _ := BuildKeyspaceSchema(good, "ks", sqlparser.NewTestParser()) err := got.Error require.NoError(t, err) ks := &Keyspace{ @@ -2585,7 +2585,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err := BuildKeyspace(good) + _, err := BuildKeyspace(good, sqlparser.NewTestParser()) require.NoError(t, err) bad := &vschemapb.Keyspace{ Sharded: true, @@ -2598,7 +2598,7 @@ func TestValidate(t *testing.T) { "t2": {}, }, } - _, err = BuildKeyspace(bad) + _, err = BuildKeyspace(bad, sqlparser.NewTestParser()) want := `vindexType "absent" not found` if err == nil || !strings.HasPrefix(err.Error(), want) { t.Errorf("Validate: %v, must start with %s", err, want) @@ -2794,7 +2794,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) none := &Table{ Name: sqlparser.NewIdentifierCS("none"), Keyspace: &Keyspace{ @@ -2835,7 +2835,7 @@ func TestFindSingleKeyspace(t *testing.T) { }, }, } - vschema = BuildVSchema(&input) + vschema = BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vschema.FindTable("", "none") wantErr := "table none not found" if err == nil || err.Error() != wantErr { @@ -2869,7 +2869,7 @@ func TestMultiColVindexPartialAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "user_region") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 2) @@ -2902,7 +2902,7 @@ func TestMultiColVindexPartialNotAllowed(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) table, err := vschema.FindTable("ksa", "multiColTbl") require.NoError(t, err) require.Len(t, table.ColumnVindexes, 1) @@ -2939,7 +2939,7 @@ func TestSourceTableHasReferencedBy(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) ref1, err := vs.FindTable("sharded1", "ref") require.NoError(t, err) ref2, err := vs.FindTable("sharded2", "ref") @@ -2973,7 +2973,7 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) t1, err := vs.FindTable("unsharded", "t1") require.NoError(t, err) // If the source of a reference table does not require explicit routing, @@ -2983,7 +2983,7 @@ func TestReferenceTableAndSourceAreGloballyRoutable(t *testing.T) { require.Equal(t, t1, globalT1) input.Keyspaces["unsharded"].RequireExplicitRouting = true - vs = BuildVSchema(&input) + vs = BuildVSchema(&input, sqlparser.NewTestParser()) _, err = vs.FindTable("sharded", "t1") require.NoError(t, err) // If the source of a reference table requires explicit routing, then @@ -3019,7 +3019,7 @@ func TestOtherTablesMakeReferenceTableAndSourceAmbiguous(t *testing.T) { }, }, } - vs := BuildVSchema(&input) + vs := BuildVSchema(&input, sqlparser.NewTestParser()) _, err := vs.FindTable("", "t1") require.Error(t, err) } @@ -3120,7 +3120,7 @@ func TestFindTableWithSequences(t *testing.T) { }, }, } - vschema := BuildVSchema(&input) + vschema := BuildVSchema(&input, sqlparser.NewTestParser()) notFoundError := func(table string) string { return fmt.Sprintf("table %s not found", table) diff --git a/go/vt/vtgate/vschema_manager.go b/go/vt/vtgate/vschema_manager.go index 20c11634b54..e202186894a 100644 --- a/go/vt/vtgate/vschema_manager.go +++ b/go/vt/vtgate/vschema_manager.go @@ -44,6 +44,7 @@ type VSchemaManager struct { cell string subscriber func(vschema *vindexes.VSchema, stats *VSchemaStats) schema SchemaInfo + parser *sqlparser.Parser } // SchemaInfo is an interface to schema tracker. @@ -71,7 +72,7 @@ func (vm *VSchemaManager) UpdateVSchema(ctx context.Context, ksName string, vsch ks := vschema.Keyspaces[ksName] - _, err = vindexes.BuildKeyspace(ks) + _, err = vindexes.BuildKeyspace(ks, vm.parser) if err != nil { return err } @@ -132,7 +133,7 @@ func (vm *VSchemaManager) VSchemaUpdate(v *vschemapb.SrvVSchema, err error) bool if v == nil { // We encountered an error, build an empty vschema. if vm.currentVschema == nil { - vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}) + vschema = vindexes.BuildVSchema(&vschemapb.SrvVSchema{}, vm.parser) } } else { vschema = vm.buildAndEnhanceVSchema(v) @@ -187,7 +188,7 @@ func (vm *VSchemaManager) Rebuild() { // buildAndEnhanceVSchema builds a new VSchema and uses information from the schema tracker to update it func (vm *VSchemaManager) buildAndEnhanceVSchema(v *vschemapb.SrvVSchema) *vindexes.VSchema { - vschema := vindexes.BuildVSchema(v) + vschema := vindexes.BuildVSchema(v, vm.parser) if vm.schema != nil { vm.updateFromSchema(vschema) // We mark the keyspaces that have foreign key management in Vitess and have cyclic foreign keys diff --git a/go/vt/vtgate/vschema_manager_test.go b/go/vt/vtgate/vschema_manager_test.go index 4d414c9d58a..53cbc323720 100644 --- a/go/vt/vtgate/vschema_manager_test.go +++ b/go/vt/vtgate/vschema_manager_test.go @@ -575,7 +575,7 @@ func TestMarkErrorIfCyclesInFk(t *testing.T) { // createFkDefinition is a helper function to create a Foreign key definition struct from the columns used in it provided as list of strings. func createFkDefinition(childCols []string, parentTableName string, parentCols []string, onUpdate, onDelete sqlparser.ReferenceAction) *sqlparser.ForeignKeyDefinition { - pKs, pTbl, _ := sqlparser.ParseTable(parentTableName) + pKs, pTbl, _ := sqlparser.NewTestParser().ParseTable(parentTableName) return &sqlparser.ForeignKeyDefinition{ Source: sqlparser.MakeColumns(childCols...), ReferenceDefinition: &sqlparser.ReferenceDefinition{ diff --git a/go/vt/vtgate/vtgate.go b/go/vt/vtgate/vtgate.go index 64260e628f0..4716cdcf794 100644 --- a/go/vt/vtgate/vtgate.go +++ b/go/vt/vtgate/vtgate.go @@ -30,6 +30,8 @@ import ( "github.com/spf13/pflag" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/acl" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" @@ -246,6 +248,7 @@ func Init( cell string, tabletTypesToWait []topodatapb.TabletType, pv plancontext.PlannerVersion, + collationEnv *collations.Environment, ) *VTGate { // Build objects from low to high level. // Start with the gateway. If we can't reach the topology service, @@ -296,10 +299,19 @@ func Init( log.Fatal("Failed to create a new sidecar database identifier cache during init as one already existed!") } + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: servenv.MySQLServerVersion(), + TruncateUILen: servenv.TruncateUILen, + TruncateErrLen: servenv.TruncateErrLen, + }) + if err != nil { + log.Fatalf("unable to initialize sql parser: %v", err) + } + var si SchemaInfo // default nil var st *vtschema.Tracker if enableSchemaChangeSignal { - st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews) + st = vtschema.NewTracker(gw.hc.Subscribe(), enableViews, parser) addKeyspacesToTracker(ctx, srvResolver, st, gw) si = st } @@ -319,6 +331,8 @@ func Init( noScatter, pv, warmingReadsPercent, + collationEnv, + parser, ) if err := executor.defaultQueryLogger(); err != nil { @@ -460,7 +474,7 @@ func (vtg *VTGate) Execute(ctx context.Context, mysqlCtx vtgateservice.MySQLConn "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute) + err = recordAndAnnotateError(err, statsKey, query, vtg.logExecute, vtg.executor.vm.parser) return session, nil, err } @@ -526,7 +540,7 @@ func (vtg *VTGate) StreamExecute(ctx context.Context, mysqlCtx vtgateservice.MyS "BindVariables": bindVariables, "Session": session, } - return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute) + return safeSession.Session, recordAndAnnotateError(err, statsKey, query, vtg.logStreamExecute, vtg.executor.vm.parser) } return safeSession.Session, nil } @@ -566,7 +580,7 @@ handleError: "BindVariables": bindVariables, "Session": session, } - err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare) + err = recordAndAnnotateError(err, statsKey, query, vtg.logPrepare, vtg.executor.vm.parser) return session, nil, err } @@ -585,7 +599,7 @@ func (vtg *VTGate) VSchemaStats() *VSchemaStats { return vtg.executor.VSchemaStats() } -func truncateErrorStrings(data map[string]any) map[string]any { +func truncateErrorStrings(data map[string]any, parser *sqlparser.Parser) map[string]any { ret := map[string]any{} if terseErrors { // request might have PII information. Return an empty map @@ -594,16 +608,16 @@ func truncateErrorStrings(data map[string]any) map[string]any { for key, val := range data { mapVal, ok := val.(map[string]any) if ok { - ret[key] = truncateErrorStrings(mapVal) + ret[key] = truncateErrorStrings(mapVal, parser) } else { strVal := fmt.Sprintf("%v", val) - ret[key] = sqlparser.TruncateForLog(strVal) + ret[key] = parser.TruncateForLog(strVal) } } return ret } -func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger) error { +func recordAndAnnotateError(err error, statsKey []string, request map[string]any, logger *logutil.ThrottledLogger, parser *sqlparser.Parser) error { ec := vterrors.Code(err) fullKey := []string{ statsKey[0], @@ -619,7 +633,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any } // Traverse the request structure and truncate any long values - request = truncateErrorStrings(request) + request = truncateErrorStrings(request, parser) errorCounts.Add(fullKey, 1) @@ -634,7 +648,7 @@ func recordAndAnnotateError(err error, statsKey []string, request map[string]any if !exists { return err } - piiSafeSQL, err2 := sqlparser.RedactSQLQuery(sql.(string)) + piiSafeSQL, err2 := parser.RedactSQLQuery(sql.(string)) if err2 != nil { return err } diff --git a/go/vt/vttablet/endtoend/framework/client.go b/go/vt/vttablet/endtoend/framework/client.go index e671cb447c7..eb70eaeb9cb 100644 --- a/go/vt/vttablet/endtoend/framework/client.go +++ b/go/vt/vttablet/endtoend/framework/client.go @@ -57,6 +57,19 @@ func NewClient() *QueryClient { } } +// NewClientWithServer creates a new client for a given server. +func NewClientWithServer(server *tabletserver.TabletServer) *QueryClient { + return &QueryClient{ + ctx: callerid.NewContext( + context.Background(), + &vtrpcpb.CallerID{}, + &querypb.VTGateCallerID{Username: "dev"}, + ), + target: Target, + server: server, + } +} + // NewClientWithTabletType creates a new client for Server with the provided tablet type. func NewClientWithTabletType(tabletType topodatapb.TabletType) *QueryClient { targetCopy := Target.CloneVT() diff --git a/go/vt/vttablet/endtoend/framework/server.go b/go/vt/vttablet/endtoend/framework/server.go index 7fca349deff..e966b934cb8 100644 --- a/go/vt/vttablet/endtoend/framework/server.go +++ b/go/vt/vttablet/endtoend/framework/server.go @@ -23,8 +23,10 @@ import ( "net/http" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/yaml2" @@ -77,7 +79,7 @@ func StartCustomServer(ctx context.Context, connParams, connAppDebugParams mysql } TopoServer = memorytopo.NewServer(ctx, "") - Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}) + Server = tabletserver.NewTabletServer(ctx, "", config, TopoServer, &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) Server.Register() err := Server.StartService(Target, dbcfgs, nil /* mysqld */) if err != nil { diff --git a/go/vt/vttablet/endtoend/misc_test.go b/go/vt/vttablet/endtoend/misc_test.go index 5c37a5d9bb0..a4e7dea89e3 100644 --- a/go/vt/vttablet/endtoend/misc_test.go +++ b/go/vt/vttablet/endtoend/misc_test.go @@ -28,20 +28,17 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/endtoend/framework" ) @@ -628,66 +625,6 @@ func (tl *testLogger) getLog(i int) string { return fmt.Sprintf("ERROR: log %d/%d does not exist", i, len(tl.logs)) } -func TestLogTruncation(t *testing.T) { - client := framework.NewClient() - tl := newTestLogger() - defer tl.Close() - - // Test that a long error string is not truncated by default - _, err := client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog := `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - wantErr := wantLog - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(0) != wantLog { - t.Errorf("log was unexpectedly truncated: got\n'%s', want\n'%s'", tl.getLog(0), wantLog) - } - - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that the data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null)", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED]", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null)", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(1) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(1), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } - - // Test that trailing comments are preserved data too long error is truncated once the option is set - sqlparser.SetTruncateErrLen(30) - _, err = client.Execute( - "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", - map[string]*querypb.BindVariable{"data": sqltypes.StringBindVariable("THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED")}, - ) - wantLog = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess [TRUNCATED] /* KEEP ME */", BindVars: {data: " [TRUNCATED]` - wantErr = `Data too long for column 'charval' at row 1 (errno 1406) (sqlstate 22001) (CallerID: dev): Sql: "insert into vitess_test values(123, null, :data, null) /* KEEP ME */", BindVars: {data: "type:VARCHAR value:\"THIS IS A LONG LONG LONG LONG QUERY STRING THAT SHOULD BE SHORTENED\""}` - if err == nil { - t.Errorf("query unexpectedly succeeded") - } - if tl.getLog(2) != wantLog { - t.Errorf("log was not truncated properly: got\n'%s', want\n'%s'", tl.getLog(2), wantLog) - } - if err.Error() != wantErr { - t.Errorf("error was unexpectedly truncated: got\n'%s', want\n'%s'", err.Error(), wantErr) - } -} - func TestClientFoundRows(t *testing.T) { client := framework.NewClient() if _, err := client.Execute("insert into vitess_test(intval, charval) values(124, 'aa')", nil); err != nil { diff --git a/go/vt/vttablet/onlineddl/analysis.go b/go/vt/vttablet/onlineddl/analysis.go index 68eee5d4b9b..dbd8a5dab45 100644 --- a/go/vt/vttablet/onlineddl/analysis.go +++ b/go/vt/vttablet/onlineddl/analysis.go @@ -75,7 +75,7 @@ func (e *Executor) getCreateTableStatement(ctx context.Context, tableName string if err != nil { return nil, vterrors.Wrapf(err, "in Executor.getCreateTableStatement()") } - stmt, err := sqlparser.ParseStrictDDL(showCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(showCreateTable) if err != nil { return nil, err } @@ -349,7 +349,7 @@ func AnalyzeInstantDDL(alterTable *sqlparser.AlterTable, createTable *sqlparser. // analyzeSpecialAlterPlan checks if the given ALTER onlineDDL, and for the current state of affected table, // can be executed in a special way. If so, it returns with a "special plan" func (e *Executor) analyzeSpecialAlterPlan(ctx context.Context, onlineDDL *schema.OnlineDDL, capableOf mysql.CapableOf) (*SpecialAlterPlan, error) { - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/analysis_test.go b/go/vt/vttablet/onlineddl/analysis_test.go index afaa3e8aa1f..d1510cf1773 100644 --- a/go/vt/vttablet/onlineddl/analysis_test.go +++ b/go/vt/vttablet/onlineddl/analysis_test.go @@ -208,15 +208,16 @@ func TestAnalyzeInstantDDL(t *testing.T) { instant: false, }, } + parser := sqlparser.NewTestParser() for _, tc := range tt { name := tc.version + " " + tc.create t.Run(name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.create) + stmt, err := parser.ParseStrictDDL(tc.create) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) - stmt, err = sqlparser.ParseStrictDDL(tc.alter) + stmt, err = parser.ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) diff --git a/go/vt/vttablet/onlineddl/executor.go b/go/vt/vttablet/onlineddl/executor.go index bb8431b5199..4d0ada6bfe2 100644 --- a/go/vt/vttablet/onlineddl/executor.go +++ b/go/vt/vttablet/onlineddl/executor.go @@ -304,7 +304,7 @@ func (e *Executor) executeQueryWithSidecarDBReplacement(ctx context.Context, que defer conn.Recycle() // Replace any provided sidecar DB qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := e.env.SQLParser().ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -413,7 +413,7 @@ func (e *Executor) allowConcurrentMigration(onlineDDL *schema.OnlineDDL) (action } var err error - action, err = onlineDDL.GetAction() + action, err = onlineDDL.GetAction(e.env.SQLParser()) if err != nil { return action, false } @@ -800,7 +800,7 @@ func (e *Executor) killTableLockHoldersAndAccessors(ctx context.Context, tableNa for _, row := range rs.Named().Rows { threadId := row.AsInt64("id", 0) infoQuery := row.AsString("info", "") - stmt, err := sqlparser.Parse(infoQuery) + stmt, err := e.env.SQLParser().Parse(infoQuery) if err != nil { log.Error(vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unable to parse processlist Info query: %v", infoQuery)) continue @@ -1383,7 +1383,7 @@ func (e *Executor) duplicateCreateTable(ctx context.Context, onlineDDL *schema.O constraintMap map[string]string, err error, ) { - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(originalShowCreateTable) if err != nil { return nil, nil, nil, err } @@ -1449,7 +1449,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return nil, err } - stmt, err := sqlparser.ParseStrictDDL(onlineDDL.SQL) + stmt, err := e.env.SQLParser().ParseStrictDDL(onlineDDL.SQL) if err != nil { return nil, err } @@ -1476,7 +1476,7 @@ func (e *Executor) initVreplicationOriginalMigration(ctx context.Context, online return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag()) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, originalShowCreateTable, vreplShowCreateTable, onlineDDL.SQL, onlineDDL.StrategySetting().IsAnalyzeTableFlag(), e.env.CollationEnv(), e.env.SQLParser()) return v, nil } @@ -1530,7 +1530,7 @@ func (e *Executor) initVreplicationRevertMigration(ctx context.Context, onlineDD if err := e.updateArtifacts(ctx, onlineDDL.UUID, vreplTableName); err != nil { return v, err } - v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false) + v = NewVRepl(onlineDDL.UUID, e.keyspace, e.shard, e.dbName, onlineDDL.Table, vreplTableName, "", "", "", false, e.env.CollationEnv(), e.env.SQLParser()) v.pos = revertStream.pos return v, nil } @@ -2400,7 +2400,7 @@ func (e *Executor) reviewEmptyTableRevertMigrations(ctx context.Context, onlineD // Try to update table name and ddl_action // Failure to do so fails the migration - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.SQLParser()) if err != nil { return false, e.failMigration(ctx, onlineDDL, fmt.Errorf("cannot analyze revert UUID for revert migration %s: %v", onlineDDL.UUID, err)) } @@ -2554,7 +2554,7 @@ func (e *Executor) reviewQueuedMigrations(ctx context.Context) error { func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigration *schema.OnlineDDL, revertingMigrationUUID string) (err error) { // Validation: migration to revert exists and is in complete state - action, actionStr, err := revertMigration.GetActionStr() + action, actionStr, err := revertMigration.GetActionStr(e.env.SQLParser()) if err != nil { return err } @@ -2623,7 +2623,7 @@ func (e *Executor) validateMigrationRevertible(ctx context.Context, revertMigrat // - what type of migration we're reverting? (CREATE/DROP/ALTER) // - revert appropriately to the type of migration func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDDL) (err error) { - revertUUID, err := onlineDDL.GetRevertUUID() + revertUUID, err := onlineDDL.GetRevertUUID(e.env.SQLParser()) if err != nil { return fmt.Errorf("cannot run a revert migration %v: %+v", onlineDDL.UUID, err) } @@ -2736,7 +2736,7 @@ func (e *Executor) executeRevert(ctx context.Context, onlineDDL *schema.OnlineDD func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schema.OnlineDDL) (diff schemadiff.EntityDiff, err error) { // Modify the CREATE TABLE statement to indicate a different, made up table name, known as the "comparison table" - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return nil, err } @@ -2793,9 +2793,9 @@ func (e *Executor) evaluateDeclarativeDiff(ctx context.Context, onlineDDL *schem hints := &schemadiff.DiffHints{AutoIncrementStrategy: schemadiff.AutoIncrementApplyHigher} switch ddlStmt.(type) { case *sqlparser.CreateTable: - diff, err = schemadiff.DiffCreateTablesQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateTablesQueries(existingShowCreateTable, newShowCreateTable, hints, e.env.SQLParser()) case *sqlparser.CreateView: - diff, err = schemadiff.DiffCreateViewsQueries(existingShowCreateTable, newShowCreateTable, hints) + diff, err = schemadiff.DiffCreateViewsQueries(existingShowCreateTable, newShowCreateTable, hints, e.env.SQLParser()) default: return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "expected CREATE TABLE or CREATE VIEW in online DDL statement: %v", onlineDDL.SQL) } @@ -2856,7 +2856,7 @@ func (e *Executor) analyzeDropDDLActionMigration(ctx context.Context, onlineDDL } } } - stmt, err := sqlparser.ParseStrictDDL(originalShowCreateTable) + stmt, err := e.env.SQLParser().ParseStrictDDL(originalShowCreateTable) if err != nil { return err } @@ -2902,7 +2902,7 @@ func (e *Executor) executeDropDDLActionMigration(ctx context.Context, onlineDDL // We transform a DROP TABLE into a RENAME TABLE statement, so as to remove the table safely and asynchronously. - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -2945,7 +2945,7 @@ func (e *Executor) executeCreateDDLActionMigration(ctx context.Context, onlineDD e.migrationMutex.Lock() defer e.migrationMutex.Unlock() - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3032,7 +3032,7 @@ func (e *Executor) executeAlterViewOnline(ctx context.Context, onlineDDL *schema if err != nil { return err } - stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + stmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return err } @@ -3191,7 +3191,7 @@ func (e *Executor) executeAlterDDLActionMigration(ctx context.Context, onlineDDL failMigration := func(err error) error { return e.failMigration(ctx, onlineDDL, err) } - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3264,7 +3264,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin return e.failMigration(ctx, onlineDDL, err) } - ddlAction, err := onlineDDL.GetAction() + ddlAction, err := onlineDDL.GetAction(e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3298,7 +3298,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table does not exist { // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3325,7 +3325,7 @@ func (e *Executor) executeMigration(ctx context.Context, onlineDDL *schema.Onlin // - Implicitly do nothing, if the table exists and is identical to CREATE statement // Sanity: reject IF NOT EXISTS statements, because they don't make sense (or are ambiguous) in declarative mode - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err != nil { return failMigration(err) } @@ -3466,7 +3466,7 @@ func (e *Executor) runNextMigration(ctx context.Context) error { } { // We strip out any VT query comments because our simplified parser doesn't work well with comments - ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL) + ddlStmt, _, err := schema.ParseOnlineDDLStatement(onlineDDL.SQL, e.env.SQLParser()) if err == nil { ddlStmt.SetComments(sqlparser.Comments{}) onlineDDL.SQL = sqlparser.String(ddlStmt) @@ -4862,7 +4862,7 @@ func (e *Executor) submittedMigrationConflictsWithPendingMigrationInSingletonCon return false } // Let's see if the pending migration is a revert: - if _, err := pendingOnlineDDL.GetRevertUUID(); err != nil { + if _, err := pendingOnlineDDL.GetRevertUUID(e.env.SQLParser()); err != nil { // Not a revert. So the pending migration definitely conflicts with our migration. return true } @@ -4997,13 +4997,13 @@ func (e *Executor) SubmitMigration( // OK, this is a new UUID - _, actionStr, err := onlineDDL.GetActionStr() + _, actionStr, err := onlineDDL.GetActionStr(e.env.SQLParser()) if err != nil { return nil, err } log.Infof("SubmitMigration: request to submit migration %s; action=%s, table=%s", onlineDDL.UUID, actionStr, onlineDDL.Table) - revertedUUID, _ := onlineDDL.GetRevertUUID() // Empty value if the migration is not actually a REVERT. Safe to ignore error. + revertedUUID, _ := onlineDDL.GetRevertUUID(e.env.SQLParser()) // Empty value if the migration is not actually a REVERT. Safe to ignore error. retainArtifactsSeconds := int64((retainOnlineDDLTables).Seconds()) if retainArtifacts, _ := onlineDDL.StrategySetting().RetainArtifactsDuration(); retainArtifacts != 0 { // Explicit retention indicated by `--retain-artifact` DDL strategy flag for this migration. Override! @@ -5029,7 +5029,7 @@ func (e *Executor) SubmitMigration( sqltypes.BoolBindVariable(onlineDDL.StrategySetting().IsPostponeCompletion()), sqltypes.BoolBindVariable(allowConcurrentMigration), sqltypes.StringBindVariable(revertedUUID), - sqltypes.BoolBindVariable(onlineDDL.IsView()), + sqltypes.BoolBindVariable(onlineDDL.IsView(e.env.SQLParser())), ) if err != nil { return nil, err diff --git a/go/vt/vttablet/onlineddl/executor_test.go b/go/vt/vttablet/onlineddl/executor_test.go index 9e100fa43eb..fac0cf7efcf 100644 --- a/go/vt/vttablet/onlineddl/executor_test.go +++ b/go/vt/vttablet/onlineddl/executor_test.go @@ -28,6 +28,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" ) @@ -44,7 +47,9 @@ func TestGetConstraintType(t *testing.T) { } func TestValidateAndEditCreateTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "ValidateAndEditCreateTableStatementTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { name string query string @@ -156,7 +161,7 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } for _, tc := range tt { t.Run(tc.name, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.query) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.query) require.NoError(t, err) createTable, ok := stmt.(*sqlparser.CreateTable) require.True(t, ok) @@ -186,7 +191,9 @@ func TestValidateAndEditCreateTableStatement(t *testing.T) { } func TestValidateAndEditAlterTableStatement(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "TestValidateAndEditAlterTableStatementTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { alter string m map[string]string @@ -256,7 +263,7 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { } for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -268,7 +275,7 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "t", Options: "--unsafe-allow-foreign-keys"} alters, err := e.validateAndEditAlterTableStatement(context.Background(), onlineDDL, alterTable, m) assert.NoError(t, err) - altersStrings := []string{} + var altersStrings []string for _, alter := range alters { altersStrings = append(altersStrings, sqlparser.String(alter)) } @@ -278,7 +285,9 @@ func TestValidateAndEditAlterTableStatement(t *testing.T) { } func TestAddInstantAlgorithm(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "AddInstantAlgorithmTest", collations.MySQL8(), sqlparser.NewTestParser()), + } tt := []struct { alter string expect string @@ -302,7 +311,7 @@ func TestAddInstantAlgorithm(t *testing.T) { } for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { - stmt, err := sqlparser.ParseStrictDDL(tc.alter) + stmt, err := e.env.SQLParser().ParseStrictDDL(tc.alter) require.NoError(t, err) alterTable, ok := stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -312,7 +321,7 @@ func TestAddInstantAlgorithm(t *testing.T) { assert.Equal(t, tc.expect, alterInstant) - stmt, err = sqlparser.ParseStrictDDL(alterInstant) + stmt, err = e.env.SQLParser().ParseStrictDDL(alterInstant) require.NoError(t, err) _, ok = stmt.(*sqlparser.AlterTable) require.True(t, ok) @@ -321,7 +330,9 @@ func TestAddInstantAlgorithm(t *testing.T) { } func TestDuplicateCreateTable(t *testing.T) { - e := Executor{} + e := Executor{ + env: tabletenv.NewEnv(nil, "DuplicateCreateTableTest", collations.MySQL8(), sqlparser.NewTestParser()), + } ctx := context.Background() onlineDDL := &schema.OnlineDDL{UUID: "a5a563da_dc1a_11ec_a416_0a43f95f28a3", Table: "something", Strategy: "vitess", Options: "--unsafe-allow-foreign-keys"} diff --git a/go/vt/vttablet/onlineddl/vrepl.go b/go/vt/vttablet/onlineddl/vrepl.go index 5cdb24ae5e4..1f9b422563d 100644 --- a/go/vt/vttablet/onlineddl/vrepl.go +++ b/go/vt/vttablet/onlineddl/vrepl.go @@ -136,6 +136,9 @@ type VRepl struct { parser *vrepl.AlterTableParser convertCharset map[string](*binlogdatapb.CharsetConversion) + + collationEnv *collations.Environment + sqlparser *sqlparser.Parser } // NewVRepl creates a VReplication handler for Online DDL @@ -149,6 +152,8 @@ func NewVRepl(workflow string, vreplShowCreateTable string, alterQuery string, analyzeTable bool, + collationEnv *collations.Environment, + parser *sqlparser.Parser, ) *VRepl { return &VRepl{ workflow: workflow, @@ -165,6 +170,8 @@ func NewVRepl(workflow string, enumToTextMap: map[string]string{}, intToEnumMap: map[string]bool{}, convertCharset: map[string](*binlogdatapb.CharsetConversion){}, + collationEnv: collationEnv, + sqlparser: parser, } } @@ -384,7 +391,7 @@ func (v *VRepl) analyzeAlter(ctx context.Context) error { // Happens for REVERT return nil } - if err := v.parser.ParseAlterStatement(v.alterQuery); err != nil { + if err := v.parser.ParseAlterStatement(v.alterQuery, v.sqlparser); err != nil { return err } if v.parser.IsRenameTable() { @@ -455,7 +462,7 @@ func (v *VRepl) analyzeTables(ctx context.Context, conn *dbconnpool.DBConnection } v.addedUniqueKeys = vrepl.AddedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) v.removedUniqueKeys = vrepl.RemovedUniqueKeys(sourceUniqueKeys, targetUniqueKeys, v.parser.ColumnRenameMap()) - v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.originalShowCreateTable, v.vreplShowCreateTable) + v.removedForeignKeyNames, err = vrepl.RemovedForeignKeyNames(v.sqlparser, v.originalShowCreateTable, v.vreplShowCreateTable) if err != nil { return err } @@ -553,11 +560,11 @@ func (v *VRepl) generateFilterQuery(ctx context.Context) error { case sourceCol.Type == vrepl.StringColumnType: // Check source and target charset/encoding. If needed, create // a binlogdatapb.CharsetConversion entry (later written to vreplication) - fromCollation := collations.Local().DefaultCollationForCharset(sourceCol.Charset) + fromCollation := v.collationEnv.DefaultCollationForCharset(sourceCol.Charset) if fromCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", sourceCol.Charset, sourceCol.Name) } - toCollation := collations.Local().DefaultCollationForCharset(targetCol.Charset) + toCollation := v.collationEnv.DefaultCollationForCharset(targetCol.Charset) // Let's see if target col is at all textual if targetCol.Type == vrepl.StringColumnType && toCollation == collations.Unknown { return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", targetCol.Charset, targetCol.Name) diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go index f0925594ec0..26a46879f79 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key.go @@ -27,6 +27,7 @@ import ( // RemovedForeignKeyNames returns the names of removed foreign keys, ignoring mere name changes func RemovedForeignKeyNames( + parser *sqlparser.Parser, originalCreateTable string, vreplCreateTable string, ) (names []string, err error) { @@ -34,7 +35,7 @@ func RemovedForeignKeyNames( return nil, nil } diffHints := schemadiff.DiffHints{ConstraintNamesStrategy: schemadiff.ConstraintNamesIgnoreAll} - diff, err := schemadiff.DiffCreateTablesQueries(originalCreateTable, vreplCreateTable, &diffHints) + diff, err := schemadiff.DiffCreateTablesQueries(originalCreateTable, vreplCreateTable, &diffHints, parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go index 619ba4847d9..7b8cf0e7363 100644 --- a/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/foreign_key_test.go @@ -24,6 +24,8 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestRemovedForeignKeyNames(t *testing.T) { @@ -66,7 +68,7 @@ func TestRemovedForeignKeyNames(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.before, func(t *testing.T) { - names, err := RemovedForeignKeyNames(tcase.before, tcase.after) + names, err := RemovedForeignKeyNames(sqlparser.NewTestParser(), tcase.before, tcase.after) assert.NoError(t, err) assert.Equal(t, tcase.names, names) }) diff --git a/go/vt/vttablet/onlineddl/vrepl/parser.go b/go/vt/vttablet/onlineddl/vrepl/parser.go index f1f2f1378d8..b5648adeabe 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser.go @@ -78,8 +78,8 @@ func (p *AlterTableParser) analyzeAlter(alterTable *sqlparser.AlterTable) { } // ParseAlterStatement is the main function of th eparser, and parses an ALTER TABLE statement -func (p *AlterTableParser) ParseAlterStatement(alterQuery string) (err error) { - stmt, err := sqlparser.ParseStrictDDL(alterQuery) +func (p *AlterTableParser) ParseAlterStatement(alterQuery string, parser *sqlparser.Parser) (err error) { + stmt, err := parser.ParseStrictDDL(alterQuery) if err != nil { return err } diff --git a/go/vt/vttablet/onlineddl/vrepl/parser_test.go b/go/vt/vttablet/onlineddl/vrepl/parser_test.go index f849b1d741d..2a7031f3a98 100644 --- a/go/vt/vttablet/onlineddl/vrepl/parser_test.go +++ b/go/vt/vttablet/onlineddl/vrepl/parser_test.go @@ -24,12 +24,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/sqlparser" ) func TestParseAlterStatement(t *testing.T) { statement := "alter table t add column t int, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -38,7 +40,7 @@ func TestParseAlterStatement(t *testing.T) { func TestParseAlterStatementTrivialRename(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -66,7 +68,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { for _, statement := range statements { parser := NewAlterTableParser() statement := "alter table t " + statement - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.True(t, parser.IsAutoIncrementDefined()) } @@ -75,7 +77,7 @@ func TestParseAlterStatementWithAutoIncrement(t *testing.T) { func TestParseAlterStatementTrivialRenames(t *testing.T) { statement := "alter table t add column t int, change ts ts timestamp, CHANGE f `f` float, engine=innodb" parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.HasNonTrivialRenames()) assert.False(t, parser.IsAutoIncrementDefined()) @@ -98,7 +100,7 @@ func TestParseAlterStatementNonTrivial(t *testing.T) { for _, statement := range statements { statement := "alter table t " + statement parser := NewAlterTableParser() - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.False(t, parser.IsAutoIncrementDefined()) renames := parser.GetNonTrivialRenames() @@ -113,7 +115,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 1) assert.True(t, parser.droppedColumns["b"]) @@ -121,7 +123,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 2) assert.True(t, parser.droppedColumns["b"]) @@ -130,7 +132,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop key c_idx, drop column `d`, drop `e`, drop primary key, drop foreign key fk_1" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, len(parser.droppedColumns), 3) assert.True(t, parser.droppedColumns["b"]) @@ -140,7 +142,7 @@ func TestParseAlterStatementDroppedColumns(t *testing.T) { { parser := NewAlterTableParser() statement := "alter table t drop column b, drop bad statement, add column i int" - err := parser.ParseAlterStatement(statement) + err := parser.ParseAlterStatement(statement, sqlparser.NewTestParser()) assert.Error(t, err) } } @@ -177,7 +179,7 @@ func TestParseAlterStatementRenameTable(t *testing.T) { for _, tc := range tt { t.Run(tc.alter, func(t *testing.T) { parser := NewAlterTableParser() - err := parser.ParseAlterStatement(tc.alter) + err := parser.ParseAlterStatement(tc.alter, sqlparser.NewTestParser()) assert.NoError(t, err) assert.Equal(t, tc.isRename, parser.isRenameTable) }) diff --git a/go/vt/vttablet/sandboxconn/sandboxconn.go b/go/vt/vttablet/sandboxconn/sandboxconn.go index 55a635984ec..ad9c1b3702f 100644 --- a/go/vt/vttablet/sandboxconn/sandboxconn.go +++ b/go/vt/vttablet/sandboxconn/sandboxconn.go @@ -128,6 +128,8 @@ type SandboxConn struct { NotServing bool getSchemaResult []map[string]string + + parser *sqlparser.Parser } var _ queryservice.QueryService = (*SandboxConn)(nil) // compile-time interface check @@ -139,6 +141,7 @@ func NewSandboxConn(t *topodatapb.Tablet) *SandboxConn { MustFailCodes: make(map[vtrpcpb.Code]int), MustFailExecute: make(map[sqlparser.StatementType]int), txIDToRID: make(map[int64]int64), + parser: sqlparser.NewTestParser(), } } @@ -225,7 +228,7 @@ func (sbc *SandboxConn) Execute(ctx context.Context, target *querypb.Target, que return nil, err } - stmt, _ := sqlparser.Parse(query) // knowingly ignoring the error + stmt, _ := sbc.parser.Parse(query) // knowingly ignoring the error if sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] > 0 { sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] = sbc.MustFailExecute[sqlparser.ASTToStatementType(stmt)] - 1 return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "failed query: %v", query) @@ -251,7 +254,7 @@ func (sbc *SandboxConn) StreamExecute(ctx context.Context, target *querypb.Targe sbc.sExecMu.Unlock() return err } - parse, _ := sqlparser.Parse(query) + parse, _ := sbc.parser.Parse(query) if sbc.results == nil { nextRs := sbc.getNextResult(parse) diff --git a/go/vt/vttablet/tabletmanager/restore.go b/go/vt/vttablet/tabletmanager/restore.go index e606e71a267..22d0abbc9e5 100644 --- a/go/vt/vttablet/tabletmanager/restore.go +++ b/go/vt/vttablet/tabletmanager/restore.go @@ -426,7 +426,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati Port: connParams.Port, } dbCfgs.SetDbParams(*connParams, *connParams, *connParams) - vsClient := vreplication.NewReplicaConnector(connParams) + vsClient := vreplication.NewReplicaConnector(connParams, tm.CollationEnv, tm.SQLParser) filter := &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{ @@ -477,7 +477,7 @@ func (tm *TabletManager) getGTIDFromTimestamp(ctx context.Context, pos replicati gtidsChan <- []string{"", ""} } }() - defer vsClient.Close(ctx) + defer vsClient.Close() select { case val := <-gtidsChan: return val[0], val[1], nil diff --git a/go/vt/vttablet/tabletmanager/rpc_query.go b/go/vt/vttablet/tabletmanager/rpc_query.go index 8b8ac605893..4a2da2bf310 100644 --- a/go/vt/vttablet/tabletmanager/rpc_query.go +++ b/go/vt/vttablet/tabletmanager/rpc_query.go @@ -54,7 +54,7 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag // Handle special possible directives var directives *sqlparser.CommentDirectives - if stmt, err := sqlparser.Parse(string(req.Query)); err == nil { + if stmt, err := tm.SQLParser.Parse(string(req.Query)); err == nil { if cmnt, ok := stmt.(sqlparser.Commented); ok { directives = cmnt.GetParsedComments().Directives() } @@ -66,7 +66,7 @@ func (tm *TabletManager) ExecuteFetchAsDba(ctx context.Context, req *tabletmanag } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -107,7 +107,7 @@ func (tm *TabletManager) ExecuteFetchAsAllPrivs(ctx context.Context, req *tablet } // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -131,7 +131,7 @@ func (tm *TabletManager) ExecuteFetchAsApp(ctx context.Context, req *tabletmanag } defer conn.Recycle() // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } @@ -145,7 +145,7 @@ func (tm *TabletManager) ExecuteQuery(ctx context.Context, req *tabletmanagerdat tablet := tm.Tablet() target := &querypb.Target{Keyspace: tablet.Keyspace, Shard: tablet.Shard, TabletType: tablet.Type} // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(string(req.Query), sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication.go b/go/vt/vttablet/tabletmanager/rpc_vreplication.go index d81d2a6e6a4..60881b4eab3 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication.go @@ -329,7 +329,7 @@ func (tm *TabletManager) UpdateVReplicationWorkflow(ctx context.Context, req *ta // VReplicationExec executes a vreplication command. func (tm *TabletManager) VReplicationExec(ctx context.Context, query string) (*querypb.QueryResult, error) { // Replace any provided sidecar database qualifiers with the correct one. - uq, err := sqlparser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) + uq, err := tm.SQLParser.ReplaceTableQualifiers(query, sidecar.DefaultName, sidecar.GetName()) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go index a70220a68fc..d062183e8c5 100644 --- a/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go +++ b/go/vt/vttablet/tabletmanager/rpc_vreplication_test.go @@ -111,7 +111,7 @@ func TestCreateVReplicationWorkflow(t *testing.T) { targetTablet := tenv.addTablet(t, targetTabletUID, targetKs, shard) defer tenv.deleteTablet(targetTablet.tablet) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tests := []struct { name string @@ -268,7 +268,7 @@ func TestMoveTables(t *testing.T) { }, }) - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tenv.mysqld.Schema = defaultSchema tenv.mysqld.Schema.DatabaseSchema = tenv.dbName @@ -656,7 +656,7 @@ func TestSourceShardSelection(t *testing.T) { defer tenv.deleteTablet(tt.tablet) } - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) tenv.ts.SaveVSchema(ctx, sourceKs, &vschemapb.Keyspace{ Sharded: true, @@ -855,7 +855,7 @@ func TestFailedMoveTablesCreateCleanup(t *testing.T) { sourceKs, shard, table, table) tenv := newTestEnv(t, ctx, sourceKs, []string{shard}) defer tenv.close() - ws := workflow.NewServer(tenv.ts, tenv.tmc) + ws := workflow.NewServer(tenv.ts, tenv.tmc, sqlparser.NewTestParser()) sourceTablet := tenv.addTablet(t, sourceTabletUID, sourceKs, shard) defer tenv.deleteTablet(sourceTablet.tablet) diff --git a/go/vt/vttablet/tabletmanager/tm_init.go b/go/vt/vttablet/tabletmanager/tm_init.go index d65115990f1..1910050e802 100644 --- a/go/vt/vttablet/tabletmanager/tm_init.go +++ b/go/vt/vttablet/tabletmanager/tm_init.go @@ -63,6 +63,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -155,6 +156,8 @@ type TabletManager struct { UpdateStream binlog.UpdateStreamControl VREngine *vreplication.Engine VDiffEngine *vdiff.Engine + CollationEnv *collations.Environment + SQLParser *sqlparser.Parser // tmState manages the TabletManager state. tmState *tmState @@ -206,7 +209,7 @@ type TabletManager struct { } // BuildTabletFromInput builds a tablet record from input parameters. -func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, db *dbconfigs.DBConfigs) (*topodatapb.Tablet, error) { +func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, db *dbconfigs.DBConfigs, collationEnv *collations.Environment) (*topodatapb.Tablet, error) { hostname := tabletHostname if hostname == "" { var err error @@ -244,14 +247,14 @@ func BuildTabletFromInput(alias *topodatapb.TabletAlias, port, grpcPort int32, d return nil, err } - var charset uint8 + var charset collations.ID if db != nil && db.Charset != "" { - charset, err = collations.Local().ParseConnectionCharset(db.Charset) + charset, err = collationEnv.ParseConnectionCharset(db.Charset) if err != nil { return nil, err } } else { - charset = collations.Local().DefaultConnectionCharset() + charset = collationEnv.DefaultConnectionCharset() } return &topodatapb.Tablet{ diff --git a/go/vt/vttablet/tabletmanager/tm_init_test.go b/go/vt/vttablet/tabletmanager/tm_init_test.go index 16dddba7dfd..b0ab9b9a1e2 100644 --- a/go/vt/vttablet/tabletmanager/tm_init_test.go +++ b/go/vt/vttablet/tabletmanager/tm_init_test.go @@ -71,16 +71,16 @@ func TestStartBuildTabletFromInput(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: map[string]string{}, DbNameOverride: "aa", - DefaultConnCollation: uint32(collations.Default()), + DefaultConnCollation: uint32(collations.MySQL8().DefaultConnectionCharset()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) // Hostname should be resolved. assert.Equal(t, wantTablet, gotTablet) tabletHostname = "" - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) assert.NotEqual(t, "", gotTablet.Hostname) @@ -92,7 +92,7 @@ func TestStartBuildTabletFromInput(t *testing.T) { Start: []byte(""), End: []byte("\xc0"), } - gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) // KeyRange check is explicit because the next comparison doesn't // show the diff well enough. @@ -102,25 +102,25 @@ func TestStartBuildTabletFromInput(t *testing.T) { // Invalid inputs. initKeyspace = "" initShard = "0" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initKeyspace = "test_keyspace" initShard = "" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "init_keyspace and init_shard must be specified") initShard = "x-y" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "cannot validate shard name") initShard = "0" initTabletType = "bad" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "unknown TabletType bad") initTabletType = "primary" - _, err = BuildTabletFromInput(alias, port, grpcport, nil) + _, err = BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) assert.Contains(t, err.Error(), "invalid init_tablet_type PRIMARY") } @@ -153,10 +153,10 @@ func TestBuildTabletFromInputWithBuildTags(t *testing.T) { Type: topodatapb.TabletType_REPLICA, Tags: servenv.AppVersion.ToStringMap(), DbNameOverride: "aa", - DefaultConnCollation: uint32(collations.Default()), + DefaultConnCollation: uint32(collations.MySQL8().DefaultConnectionCharset()), } - gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil) + gotTablet, err := BuildTabletFromInput(alias, port, grpcport, nil, collations.MySQL8()) require.NoError(t, err) assert.Equal(t, wantTablet, gotTablet) } diff --git a/go/vt/vttablet/tabletmanager/vdiff/controller.go b/go/vt/vttablet/tabletmanager/vdiff/controller.go index 9dc2660061e..1c50c0597ef 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/controller.go +++ b/go/vt/vttablet/tabletmanager/vdiff/controller.go @@ -238,7 +238,7 @@ func (ct *controller) start(ctx context.Context, dbClient binlogplayer.DBClient) return err } - wd, err := newWorkflowDiffer(ct, ct.options) + wd, err := newWorkflowDiffer(ct, ct.options, ct.vde.collationEnv) if err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine.go b/go/vt/vttablet/tabletmanager/vdiff/engine.go index 1ccf3dc80e6..16e8a89d90e 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine.go @@ -24,6 +24,7 @@ import ( "sync" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/proto/topodata" @@ -36,7 +37,6 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/topo" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) type Engine struct { @@ -69,14 +69,19 @@ type Engine struct { // modified behavior for that env, e.g. not starting the retry goroutine. This should // NOT be set in production. fortests bool + + collationEnv *collations.Environment + parser *sqlparser.Parser } -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, tablet *topodata.Tablet) *Engine { +func NewEngine(ts *topo.Server, tablet *topodata.Tablet, collationEnv *collations.Environment, parser *sqlparser.Parser) *Engine { vde := &Engine{ controllers: make(map[int64]*controller), ts: ts, thisTablet: tablet, tmClientFactory: func() tmclient.TabletManagerClient { return tmclient.NewTabletManagerClient() }, + collationEnv: collationEnv, + parser: parser, } return vde } @@ -94,6 +99,8 @@ func NewTestEngine(ts *topo.Server, tablet *topodata.Tablet, dbn string, dbcf fu dbClientFactoryDba: dbcf, tmClientFactory: tmcf, fortests: true, + collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vde } @@ -104,10 +111,10 @@ func (vde *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { return } vde.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vde.parser) } vde.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vde.parser) } vde.dbName = dbcfgs.DBName } diff --git a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go index 0aedeec415b..ca548a9a478 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/engine_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/engine_test.go @@ -27,7 +27,6 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" diff --git a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go index d0b81179f0f..0676c5204be 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/framework_test.go @@ -100,16 +100,26 @@ var ( Columns: []string{"id", "dt"}, PrimaryKeyColumns: []string{"id"}, Fields: sqltypes.MakeTestFields("id|dt", "int64|datetime"), + }, { + Name: "nopk", + Columns: []string{"c1", "c2", "c3"}, + Fields: sqltypes.MakeTestFields("c1|c2|c3", "int64|int64|int64"), + }, { + Name: "nopkwithpke", + Columns: []string{"c1", "c2", "c3"}, + Fields: sqltypes.MakeTestFields("c1|c2|c3", "int64|int64|int64"), }, }, } tableDefMap = map[string]int{ - "t1": 0, - "nonpktext": 1, - "pktext": 2, - "multipk": 3, - "aggr": 4, - "datze": 5, + "t1": 0, + "nonpktext": 1, + "pktext": 2, + "multipk": 3, + "aggr": 4, + "datze": 5, + "nopk": 6, + "nopkwithpke": 7, } ) @@ -397,7 +407,7 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } func (dbc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/report.go b/go/vt/vttablet/tabletmanager/vdiff/report.go index f61929ea32c..62ce6d24585 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/report.go +++ b/go/vt/vttablet/tabletmanager/vdiff/report.go @@ -66,7 +66,7 @@ type RowDiff struct { func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]string) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.wd.ct.vde.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go index 83244810b12..d658fea2a25 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_differ.go @@ -447,20 +447,21 @@ func (td *tableDiffer) setupRowSorters() { for shard, source := range td.wd.ct.sources { sources[shard] = source.shardStreamer } - td.sourcePrimitive = newMergeSorter(sources, td.tablePlan.comparePKs) + td.sourcePrimitive = newMergeSorter(sources, td.tablePlan.comparePKs, td.wd.collationEnv) // Create a merge sorter for the target. targets := make(map[string]*shardStreamer) targets[td.wd.ct.targetShardStreamer.shard] = td.wd.ct.targetShardStreamer - td.targetPrimitive = newMergeSorter(targets, td.tablePlan.comparePKs) + td.targetPrimitive = newMergeSorter(targets, td.tablePlan.comparePKs, td.wd.collationEnv) // If there were aggregate expressions, we have to re-aggregate // the results, which engine.OrderedAggregate can do. if len(td.tablePlan.aggregates) != 0 { td.sourcePrimitive = &engine.OrderedAggregate{ - Aggregates: td.tablePlan.aggregates, - GroupByKeys: pkColsToGroupByParams(td.tablePlan.pkCols), - Input: td.sourcePrimitive, + Aggregates: td.tablePlan.aggregates, + GroupByKeys: pkColsToGroupByParams(td.tablePlan.pkCols, td.wd.collationEnv), + Input: td.sourcePrimitive, + CollationEnv: td.wd.collationEnv, } } } @@ -678,7 +679,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com if collationID == collations.Unknown { collationID = collations.CollationBinaryID } - c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) + c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], td.wd.collationEnv, collationID) if err != nil { return 0, err } diff --git a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go index 2efa9692c97..548f902e9ac 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/table_plan.go +++ b/go/vt/vttablet/tabletmanager/vdiff/table_plan.go @@ -17,21 +17,23 @@ limitations under the License. package vdiff import ( + "context" "fmt" "strings" "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/log" - querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/engine" "vitess.io/vitess/go/vt/vtgate/engine/opcode" + + querypb "vitess.io/vitess/go/vt/proto/query" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) const sqlSelectColumnCollations = "select column_name as column_name, collation_name as collation_name from information_schema.columns where table_schema=%a and table_name=%a and column_name in %a" @@ -59,12 +61,12 @@ type tablePlan struct { aggregates []*engine.AggregateParams } -func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName string) (*tablePlan, error) { +func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName string, collationEnv *collations.Environment) (*tablePlan, error) { tp := &tablePlan{ table: td.table, dbName: dbName, } - statement, err := sqlparser.Parse(td.sourceQuery) + statement, err := td.wd.ct.vde.parser.Parse(td.sourceQuery) if err != nil { return nil, err } @@ -75,7 +77,7 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str sourceSelect := &sqlparser.Select{} targetSelect := &sqlparser.Select{} - // aggregates is the list of Aggregate functions, if any. + // Aggregates is the list of Aggregate functions, if any. var aggregates []*engine.AggregateParams for _, selExpr := range sel.SelectExprs { switch selExpr := selExpr.(type) { @@ -112,7 +114,8 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str aggregates = append(aggregates, engine.NewAggregateParam( /*opcode*/ opcode.AggregateSum, /*offset*/ len(sourceSelect.SelectExprs)-1, - /*alias*/ "")) + /*alias*/ "", collationEnv), + ) } } default: @@ -152,10 +155,25 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str }, } - err = tp.findPKs(dbClient, targetSelect) + if len(tp.table.PrimaryKeyColumns) == 0 { + // We use the columns from a PKE if there is one. + pkeCols, err := tp.getPKEquivalentColumns(dbClient) + if err != nil { + return nil, vterrors.Wrapf(err, "error getting PK equivalent columns for table %s", tp.table.Name) + } + if len(pkeCols) > 0 { + tp.table.PrimaryKeyColumns = append(tp.table.PrimaryKeyColumns, pkeCols...) + } else { + // We use every column together as a substitute PK. + tp.table.PrimaryKeyColumns = append(tp.table.PrimaryKeyColumns, tp.table.Columns...) + } + } + + err = tp.findPKs(dbClient, targetSelect, collationEnv) if err != nil { return nil, err } + // Remove in_keyrange. It's not understood by mysql. sourceSelect.Where = sel.Where // removeKeyrange(sel.Where) // The source should also perform the group by. @@ -176,7 +194,10 @@ func (td *tableDiffer) buildTablePlan(dbClient binlogplayer.DBClient, dbName str } // findPKs identifies PKs and removes them from the columns to do data comparison. -func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlparser.Select) error { +func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlparser.Select, collationEnv *collations.Environment) error { + if len(tp.table.PrimaryKeyColumns) == 0 { + return nil + } var orderby sqlparser.OrderBy for _, pk := range tp.table.PrimaryKeyColumns { found := false @@ -195,7 +216,7 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa tp.compareCols[i].isPK = true tp.comparePKs = append(tp.comparePKs, tp.compareCols[i]) tp.selectPks = append(tp.selectPks, i) - // We'll be comparing pks separately. So, remove them from compareCols. + // We'll be comparing PKs separately. So, remove them from compareCols. tp.pkCols = append(tp.pkCols, i) found = true break @@ -210,7 +231,7 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa Direction: sqlparser.AscOrder, }) } - if err := tp.getPKColumnCollations(dbClient); err != nil { + if err := tp.getPKColumnCollations(dbClient, collationEnv); err != nil { return vterrors.Wrapf(err, "error getting PK column collations for table %s", tp.table.Name) } tp.orderBy = orderby @@ -222,7 +243,10 @@ func (tp *tablePlan) findPKs(dbClient binlogplayer.DBClient, targetSelect *sqlpa // sorting when we do the merge sort and for the comparisons. It then // saves the collations in the tablePlan's comparePKs column info // structs for those subsequent operations. -func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error { +func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient, collationEnv *collations.Environment) error { + if len(tp.comparePKs) == 0 { + return nil + } columnList := make([]string, len(tp.comparePKs)) for i := range tp.comparePKs { columnList[i] = tp.comparePKs[i].colName @@ -246,7 +270,6 @@ func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error if qr == nil || len(qr.Rows) != len(tp.comparePKs) { return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG] unexpected result for query %s: %+v", query, qr) } - collationEnv := collations.Local() for _, row := range qr.Named().Rows { columnName := row["column_name"].ToString() collateName := strings.ToLower(row["collation_name"].ToString()) @@ -259,3 +282,17 @@ func (tp *tablePlan) getPKColumnCollations(dbClient binlogplayer.DBClient) error } return nil } + +func (tp *tablePlan) getPKEquivalentColumns(dbClient binlogplayer.DBClient) ([]string, error) { + ctx, cancel := context.WithTimeout(context.Background(), BackgroundOperationTimeout/2) + defer cancel() + executeFetch := func(query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { + // This sets wantfields to true. + return dbClient.ExecuteFetch(query, maxrows) + } + pkeCols, _, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, executeFetch, tp.dbName, tp.table.Name) + if err != nil { + return nil, err + } + return pkeCols, nil +} diff --git a/go/vt/vttablet/tabletmanager/vdiff/utils.go b/go/vt/vttablet/tabletmanager/vdiff/utils.go index dc11dbf249c..07e070976a9 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/utils.go +++ b/go/vt/vttablet/tabletmanager/vdiff/utils.go @@ -33,7 +33,7 @@ import ( ) // newMergeSorter creates an engine.MergeSort based on the shard streamers and pk columns -func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo) *engine.MergeSort { +func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo, collationEnv *collations.Environment) *engine.MergeSort { prims := make([]engine.StreamExecutor, 0, len(participants)) for _, participant := range participants { prims = append(prims, participant) @@ -46,7 +46,7 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare if cpk.collation != collations.Unknown { collation = cpk.collation } - ob[i] = evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation)} + ob[i] = evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation), CollationEnv: collationEnv} } return &engine.MergeSort{ Primitives: prims, @@ -63,10 +63,10 @@ func encodeString(in string) string { return buf.String() } -func pkColsToGroupByParams(pkCols []int) []*engine.GroupByParams { +func pkColsToGroupByParams(pkCols []int, collationEnv *collations.Environment) []*engine.GroupByParams { var res []*engine.GroupByParams for _, col := range pkCols { - res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1}) + res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, CollationEnv: collationEnv}) } return res } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go index 8f45d2cd0fd..703895e8dda 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ.go @@ -26,21 +26,20 @@ import ( "google.golang.org/protobuf/encoding/prototext" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" - "vitess.io/vitess/go/vt/vtgate/vindexes" - - "vitess.io/vitess/go/vt/schema" - "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtctl/schematools" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" ) @@ -51,13 +50,16 @@ type workflowDiffer struct { tableDiffers map[string]*tableDiffer // key is table name opts *tabletmanagerdatapb.VDiffOptions + + collationEnv *collations.Environment } -func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions) (*workflowDiffer, error) { +func newWorkflowDiffer(ct *controller, opts *tabletmanagerdatapb.VDiffOptions, collationEnv *collations.Environment) (*workflowDiffer, error) { wd := &workflowDiffer{ ct: ct, opts: opts, tableDiffers: make(map[string]*tableDiffer, 1), + collationEnv: collationEnv, } return wd, nil } @@ -354,7 +356,7 @@ func (wd *workflowDiffer) buildPlan(dbClient binlogplayer.DBClient, filter *binl } td.lastPK = lastpkpb wd.tableDiffers[table.Name] = td - if _, err := td.buildTablePlan(dbClient, wd.ct.vde.dbName); err != nil { + if _, err := td.buildTablePlan(dbClient, wd.ct.vde.dbName, wd.collationEnv); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go index 10c6406f046..a460b87a4f6 100644 --- a/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go +++ b/go/vt/vttablet/tabletmanager/vdiff/workflow_differ_test.go @@ -67,8 +67,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -87,8 +87,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -107,8 +107,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -127,8 +127,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c2, c1 from t1 order by c1 asc", targetQuery: "select c2, c1 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -147,8 +147,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c0 as c1, c2 from t2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -157,7 +157,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // non-pk text column. + // Non-PK text column. input: &binlogdatapb.Rule{ Match: "nonpktext", Filter: "select c1, textcol from nonpktext", @@ -168,8 +168,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select c1, textcol from nonpktext order by c1 asc", targetQuery: "select c1, textcol from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "textcol"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -178,7 +178,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // non-pk text column, different order. + // Non-PK text column, different order. input: &binlogdatapb.Rule{ Match: "nonpktext", Filter: "select textcol, c1 from nonpktext", @@ -189,8 +189,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["nonpktext"]], sourceQuery: "select textcol, c1 from nonpktext order by c1 asc", targetQuery: "select textcol, c1 from nonpktext order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "textcol"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -199,7 +199,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // pk text column. + // PK text column. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select textcol, c2 from pktext", @@ -210,8 +210,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select textcol, c2 from pktext order by textcol asc", targetQuery: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -220,7 +220,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // pk text column, different order. + // PK text column, different order. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select c2, textcol from pktext", @@ -231,8 +231,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -241,7 +241,61 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // text column as expression. + // No PK. Use all columns as a substitute. + input: &binlogdatapb.Rule{ + Match: "nopk", + Filter: "select * from nopk", + }, + table: "nopk", + tablePlan: &tablePlan{ + dbName: vdiffDBName, + table: testSchema.TableDefinitions[tableDefMap["nopk"]], + sourceQuery: "select c1, c2, c3 from nopk order by c1 asc, c2 asc, c3 asc", + targetQuery: "select c1, c2, c3 from nopk order by c1 asc, c2 asc, c3 asc", + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + pkCols: []int{0, 1, 2}, + selectPks: []int{0, 1, 2}, + orderBy: sqlparser.OrderBy{ + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c1")}, + Direction: sqlparser.AscOrder, + }, + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c2")}, + Direction: sqlparser.AscOrder, + }, + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c3")}, + Direction: sqlparser.AscOrder, + }, + }, + }, + }, { + // No PK, but a PKE on c3. + input: &binlogdatapb.Rule{ + Match: "nopkwithpke", + Filter: "select * from nopkwithpke", + }, + table: "nopkwithpke", + tablePlan: &tablePlan{ + dbName: vdiffDBName, + table: testSchema.TableDefinitions[tableDefMap["nopkwithpke"]], + sourceQuery: "select c1, c2, c3 from nopkwithpke order by c3 asc", + targetQuery: "select c1, c2, c3 from nopkwithpke order by c3 asc", + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + comparePKs: []compareColInfo{{2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c3"}}, + pkCols: []int{2}, + selectPks: []int{2}, + orderBy: sqlparser.OrderBy{ + &sqlparser.Order{ + Expr: &sqlparser.ColName{Name: sqlparser.NewIdentifierCI("c3")}, + Direction: sqlparser.AscOrder, + }, + }, + }, + }, { + // Text column as expression. input: &binlogdatapb.Rule{ Match: "pktext", Filter: "select c2, a+b as textcol from pktext", @@ -252,8 +306,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["pktext"]], sourceQuery: "select c2, a + b as textcol from pktext order by textcol asc", targetQuery: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, - comparePKs: []compareColInfo{{1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, + comparePKs: []compareColInfo{{1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "textcol"}}, pkCols: []int{1}, selectPks: []int{1}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -262,7 +316,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // multiple pk columns. + // Multiple PK columns. input: &binlogdatapb.Rule{ Match: "multipk", }, @@ -272,8 +326,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["multipk"]], sourceQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", targetQuery: "select c1, c2 from multipk order by c1 asc, c2 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c2"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c2"}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, orderBy: sqlparser.OrderBy{ @@ -299,8 +353,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -321,8 +375,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -343,8 +397,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where in_keyrange('-80') and c2 = 2 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -365,8 +419,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and c1 = 1 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -387,8 +441,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 where c2 = 2 and in_keyrange('-80') order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -397,7 +451,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // group by + // Group by. input: &binlogdatapb.Rule{ Match: "t1", Filter: "select * from t1 group by c1", @@ -408,8 +462,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["t1"]], sourceQuery: "select c1, c2 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2 from t1 order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -418,7 +472,7 @@ func TestBuildPlanSuccess(t *testing.T) { }}, }, }, { - // aggregations + // Aggregations. input: &binlogdatapb.Rule{ Match: "aggr", Filter: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1", @@ -429,8 +483,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["aggr"]], sourceQuery: "select c1, c2, count(*) as c3, sum(c4) as c4 from t1 group by c1 order by c1 asc", targetQuery: "select c1, c2, c3, c4 from aggr order by c1 asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c3"}, {3, collations.Local().LookupByName(sqltypes.NULL.String()), false, "c4"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "c1"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c2"}, {2, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c3"}, {3, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "c4"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "c1"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -438,12 +492,12 @@ func TestBuildPlanSuccess(t *testing.T) { Direction: sqlparser.AscOrder, }}, aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(opcode.AggregateSum, 2, ""), - engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + engine.NewAggregateParam(opcode.AggregateSum, 2, "", collations.MySQL8()), + engine.NewAggregateParam(opcode.AggregateSum, 3, "", collations.MySQL8()), }, }, }, { - // date conversion on import. + // Date conversion on import. input: &binlogdatapb.Rule{ Match: "datze", }, @@ -454,8 +508,8 @@ func TestBuildPlanSuccess(t *testing.T) { table: testSchema.TableDefinitions[tableDefMap["datze"]], sourceQuery: "select id, dt from datze order by id asc", targetQuery: "select id, convert_tz(dt, 'UTC', 'US/Pacific') as dt from datze order by id asc", - compareCols: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}, {1, collations.Local().LookupByName(sqltypes.NULL.String()), false, "dt"}}, - comparePKs: []compareColInfo{{0, collations.Local().LookupByName(sqltypes.NULL.String()), true, "id"}}, + compareCols: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "id"}, {1, collations.MySQL8().LookupByName(sqltypes.NULL.String()), false, "dt"}}, + comparePKs: []compareColInfo{{0, collations.MySQL8().LookupByName(sqltypes.NULL.String()), true, "id"}}, pkCols: []int{0}, selectPks: []int{0}, orderBy: sqlparser.OrderBy{&sqlparser.Order{ @@ -478,34 +532,49 @@ func TestBuildPlanSuccess(t *testing.T) { dbc := binlogplayer.NewMockDBClient(t) filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} vdiffenv.opts.CoreOptions.Tables = tcase.table - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) - columnList := make([]string, len(tcase.tablePlan.comparePKs)) - collationList := make([]string, len(tcase.tablePlan.comparePKs)) - env := collations.Local() - for i := range tcase.tablePlan.comparePKs { - columnList[i] = tcase.tablePlan.comparePKs[i].colName - if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { - collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) - } else { - collationList[i] = sqltypes.NULL.String() + if len(tcase.tablePlan.table.PrimaryKeyColumns) == 0 { + result := noResults + if tcase.table == "nopkwithpke" { // This has a PKE column: c3 + result = sqltypes.MakeTestResult( + sqltypes.MakeTestFields( + "column_name|index_name", + "varchar|varchar", + ), + "c3|c3", + ) } + dbc.ExpectRequestRE("SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS", result, nil) + } + if len(tcase.tablePlan.comparePKs) > 0 { + columnList := make([]string, len(tcase.tablePlan.comparePKs)) + collationList := make([]string, len(tcase.tablePlan.comparePKs)) + env := collations.MySQL8() + for i := range tcase.tablePlan.comparePKs { + columnList[i] = tcase.tablePlan.comparePKs[i].colName + if tcase.tablePlan.comparePKs[i].collation != collations.Unknown { + collationList[i] = env.LookupName(tcase.tablePlan.comparePKs[i].collation) + } else { + collationList[i] = sqltypes.NULL.String() + } + } + columnBV, err := sqltypes.BuildBindVariable(columnList) + require.NoError(t, err) + query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, + sqltypes.StringBindVariable(vdiffDBName), + sqltypes.StringBindVariable(tcase.tablePlan.table.Name), + columnBV, + ) + require.NoError(t, err) + dbc.ExpectRequest(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields( + "collation_name", + "varchar", + ), + collationList..., + ), nil) } - columnBV, err := sqltypes.BuildBindVariable(columnList) - require.NoError(t, err) - query, err := sqlparser.ParseAndBind(sqlSelectColumnCollations, - sqltypes.StringBindVariable(vdiffDBName), - sqltypes.StringBindVariable(tcase.tablePlan.table.Name), - columnBV, - ) - require.NoError(t, err) - dbc.ExpectRequest(query, sqltypes.MakeTestResult(sqltypes.MakeTestFields( - "collation_name", - "varchar", - ), - collationList..., - ), nil) err = wd.buildPlan(dbc, filter, testSchema) require.NoError(t, err, tcase.input) require.Equal(t, 1, len(wd.tableDiffers), tcase.input) @@ -577,7 +646,7 @@ func TestBuildPlanInclude(t *testing.T) { for _, tcase := range testcases { dbc := binlogplayer.NewMockDBClient(t) vdiffenv.opts.CoreOptions.Tables = strings.Join(tcase.tables, ",") - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) for _, table := range tcase.tables { query := fmt.Sprintf(`select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report @@ -650,7 +719,7 @@ func TestBuildPlanFailure(t *testing.T) { dbc := binlogplayer.NewMockDBClient(t) filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} vdiffenv.opts.CoreOptions.Tables = tcase.input.Match - wd, err := newWorkflowDiffer(ct, vdiffenv.opts) + wd, err := newWorkflowDiffer(ct, vdiffenv.opts, collations.MySQL8()) require.NoError(t, err) dbc.ExpectRequestRE("select vdt.lastpk as lastpk, vdt.mismatch as mismatch, vdt.report as report", noResults, nil) err = wd.buildPlan(dbc, filter, testSchema) diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go index b168625d20a..4e8e827145a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan.go @@ -51,8 +51,8 @@ const ( ) // buildControllerPlan parses the input query and returns an appropriate plan. -func buildControllerPlan(query string) (*controllerPlan, error) { - stmt, err := sqlparser.Parse(query) +func buildControllerPlan(query string, parser *sqlparser.Parser) (*controllerPlan, error) { + stmt, err := parser.Parse(query) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go index 391b8d9c67e..275fb7fc455 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller_plan_test.go @@ -21,6 +21,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testControllerPlan struct { @@ -240,7 +242,7 @@ func TestControllerPlan(t *testing.T) { }} for _, tcase := range tcases { t.Run(tcase.in, func(t *testing.T) { - pl, err := buildControllerPlan(tcase.in) + pl, err := buildControllerPlan(tcase.in, sqlparser.NewTestParser()) if tcase.err != "" { require.EqualError(t, err, tcase.err) return diff --git a/go/vt/vttablet/tabletmanager/vreplication/engine.go b/go/vt/vttablet/tabletmanager/vreplication/engine.go index f230ecce045..1f8f2236e6a 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/engine.go +++ b/go/vt/vttablet/tabletmanager/vreplication/engine.go @@ -28,9 +28,8 @@ import ( "time" "vitess.io/vitess/go/constants/sidecar" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" @@ -39,6 +38,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -111,6 +111,9 @@ type Engine struct { // enabled in NewSimpleTestEngine. This should NOT be used in // production. shortcircuit bool + + collationEnv *collations.Environment + parser *sqlparser.Parser } type journalEvent struct { @@ -127,15 +130,17 @@ type PostCopyAction struct { // NewEngine creates a new Engine. // A nil ts means that the Engine is disabled. -func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler) *Engine { +func NewEngine(config *tabletenv.TabletConfig, ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, lagThrottler *throttle.Throttler, collationEnv *collations.Environment, parser *sqlparser.Parser) *Engine { vre := &Engine{ controllers: make(map[int32]*controller), ts: ts, cell: cell, mysqld: mysqld, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(config.ExternalConnections), + ec: newExternalConnector(config.ExternalConnections, collationEnv, parser), throttlerClient: throttle.NewBackgroundClient(lagThrottler, throttlerapp.VReplicationName, throttle.ThrottleCheckPrimaryWrite), + collationEnv: collationEnv, + parser: parser, } return vre @@ -148,10 +153,10 @@ func (vre *Engine) InitDBConfig(dbcfgs *dbconfigs.DBConfigs) { return } vre.dbClientFactoryFiltered = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB()) + return binlogplayer.NewDBClient(dbcfgs.FilteredWithDB(), vre.parser) } vre.dbClientFactoryDba = func() binlogplayer.DBClient { - return binlogplayer.NewDBClient(dbcfgs.DbaWithDB()) + return binlogplayer.NewDBClient(dbcfgs.DbaWithDB(), vre.parser) } vre.dbName = dbcfgs.DBName } @@ -167,7 +172,9 @@ func NewTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaemon, db dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig), + ec: newExternalConnector(externalConfig, collations.MySQL8(), sqlparser.NewTestParser()), + collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vre } @@ -184,8 +191,10 @@ func NewSimpleTestEngine(ts *topo.Server, cell string, mysqld mysqlctl.MysqlDaem dbClientFactoryDba: dbClientFactoryDba, dbName: dbname, journaler: make(map[string]*journalEvent), - ec: newExternalConnector(externalConfig), + ec: newExternalConnector(externalConfig, collations.MySQL8(), sqlparser.NewTestParser()), shortcircuit: true, + collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } return vre } @@ -362,7 +371,7 @@ func (vre *Engine) exec(query string, runAsAdmin bool) (*sqltypes.Result, error) } defer vre.updateStats() - plan, err := buildControllerPlan(query) + plan, err := buildControllerPlan(query, vre.parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go index 1c20e2054be..a3974f70b90 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/external_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/external_connector.go @@ -17,10 +17,10 @@ limitations under the License. package vreplication import ( - "sync" - "context" + "sync" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/grpcclient" @@ -28,6 +28,7 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" @@ -58,15 +59,19 @@ type VStreamerClient interface { } type externalConnector struct { - mu sync.Mutex - dbconfigs map[string]*dbconfigs.DBConfigs - connectors map[string]*mysqlConnector + mu sync.Mutex + dbconfigs map[string]*dbconfigs.DBConfigs + connectors map[string]*mysqlConnector + collationEnv *collations.Environment + parser *sqlparser.Parser } -func newExternalConnector(dbcfgs map[string]*dbconfigs.DBConfigs) *externalConnector { +func newExternalConnector(dbcfgs map[string]*dbconfigs.DBConfigs, collationEnv *collations.Environment, parser *sqlparser.Parser) *externalConnector { return &externalConnector{ - dbconfigs: dbcfgs, - connectors: make(map[string]*mysqlConnector), + dbconfigs: dbcfgs, + connectors: make(map[string]*mysqlConnector), + collationEnv: collationEnv, + parser: parser, } } @@ -91,7 +96,7 @@ func (ec *externalConnector) Get(name string) (*mysqlConnector, error) { return nil, vterrors.Errorf(vtrpcpb.Code_NOT_FOUND, "external mysqlConnector %v not found", name) } c := &mysqlConnector{} - c.env = tabletenv.NewEnv(config, name) + c.env = tabletenv.NewEnv(config, name, ec.collationEnv, ec.parser) c.se = schema.NewEngine(c.env) c.vstreamer = vstreamer.NewEngine(c.env, nil, c.se, nil, "") c.vstreamer.InitDBConfig("", "") diff --git a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go index 64a924f28d3..6f0a9a5c6b5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/framework_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/framework_test.go @@ -32,6 +32,8 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/sqlparser" + _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/replication" @@ -44,7 +46,6 @@ import ( "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" "vitess.io/vitess/go/vt/sidecardb" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -495,7 +496,7 @@ func (dbc *realDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resu } func (dc *realDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } @@ -567,6 +568,9 @@ func shouldIgnoreQuery(query string) bool { ", component_throttled=", // update of last throttle time, can happen out-of-band, so can't test for it "context cancel", "SELECT rows_copied FROM _vt.vreplication WHERE id=", + // This is only executed if the table has no defined Primary Key, which we don't know in the lower level + // code. + "SELECT index_cols.COLUMN_NAME AS column_name, index_cols.INDEX_NAME as index_name FROM information_schema.STATISTICS", } if sidecardb.MatchesInitQuery(query) { return true diff --git a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go index 9c6f427b418..a1b38eb07ae 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replica_connector.go @@ -18,17 +18,15 @@ package vreplication import ( "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" "context" - "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" - querypb "vitess.io/vitess/go/vt/proto/query" - vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" - "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer" ) @@ -38,7 +36,7 @@ import ( // This is used by binlog server to make vstream connection // using the vstream connection, it will parse the events from binglog // to fetch the corresponding GTID for required recovery time -func NewReplicaConnector(connParams *mysql.ConnParams) *ReplicaConnector { +func NewReplicaConnector(connParams *mysql.ConnParams, collationEnv *collations.Environment, parser *sqlparser.Parser) *ReplicaConnector { // Construct config := tabletenv.NewDefaultConfig() @@ -49,7 +47,7 @@ func NewReplicaConnector(connParams *mysql.ConnParams) *ReplicaConnector { dbCfg.SetDbParams(*connParams, *connParams, *connParams) config.DB = dbCfg c := &ReplicaConnector{conn: connParams} - env := tabletenv.NewEnv(config, "source") + env := tabletenv.NewEnv(config, "source", collationEnv, parser) c.se = schema.NewEngine(env) c.se.SkipMetaCheck = true c.vstreamer = vstreamer.NewEngine(env, nil, c.se, nil, "") @@ -70,33 +68,12 @@ type ReplicaConnector struct { vstreamer *vstreamer.Engine } -func (c *ReplicaConnector) shutdown() { +func (c *ReplicaConnector) Close() error { c.vstreamer.Close() c.se.Close() -} - -func (c *ReplicaConnector) Open(ctx context.Context) error { - return nil -} - -func (c *ReplicaConnector) Close(ctx context.Context) error { - c.shutdown() return nil } func (c *ReplicaConnector) VStream(ctx context.Context, startPos string, filter *binlogdatapb.Filter, send func([]*binlogdatapb.VEvent) error) error { return c.vstreamer.Stream(ctx, startPos, nil, filter, throttlerapp.ReplicaConnectorName, send) } - -// VStreamRows streams rows from query result -func (c *ReplicaConnector) VStreamRows(ctx context.Context, query string, lastpk *querypb.QueryResult, send func(*binlogdatapb.VStreamRowsResponse) error) error { - var row []sqltypes.Value - if lastpk != nil { - r := sqltypes.Proto3ToResult(lastpk) - if len(r.Rows) != 1 { - return vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "unexpected lastpk input: %v", lastpk) - } - row = r.Rows[0] - } - return c.vstreamer.StreamRows(ctx, query, row, send) -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go index 9ecf8669d6d..a328249d0e0 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan.go @@ -59,6 +59,7 @@ type ReplicatorPlan struct { ColInfoMap map[string][]*ColumnInfo stats *binlogplayer.Stats Source *binlogdatapb.BinlogSource + collationEnv *collations.Environment } // buildExecution plan uses the field info as input and the partially built @@ -98,11 +99,12 @@ func (rp *ReplicatorPlan) buildExecutionPlan(fieldEvent *binlogdatapb.FieldEvent // requires us to wait for the field info sent by the source. func (rp *ReplicatorPlan) buildFromFields(tableName string, lastpk *sqltypes.Result, fields []*querypb.Field) (*TablePlan, error) { tpb := &tablePlanBuilder{ - name: sqlparser.NewIdentifierCS(tableName), - lastpk: lastpk, - colInfos: rp.ColInfoMap[tableName], - stats: rp.stats, - source: rp.Source, + name: sqlparser.NewIdentifierCS(tableName), + lastpk: lastpk, + colInfos: rp.ColInfoMap[tableName], + stats: rp.stats, + source: rp.Source, + collationEnv: rp.collationEnv, } for _, field := range fields { colName := sqlparser.NewIdentifierCI(field.Name) @@ -217,6 +219,8 @@ type TablePlan struct { PartialInserts map[string]*sqlparser.ParsedQuery // PartialUpdates are same as PartialInserts, but for update statements PartialUpdates map[string]*sqlparser.ParsedQuery + + CollationEnv *collations.Environment } // MarshalJSON performs a custom JSON Marshalling. @@ -254,7 +258,7 @@ func (tp *TablePlan) applyBulkInsert(sqlbuffer *bytes2.Buffer, rows []*querypb.R if i > 0 { sqlbuffer.WriteString(", ") } - if err := tp.BulkInsertValues.AppendFromRow(sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { + if err := appendFromRow(tp.BulkInsertValues, sqlbuffer, tp.Fields, row, tp.FieldsToSkip); err != nil { return nil, err } } @@ -299,7 +303,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, rowVal, _ := sqltypes.BindVariableToValue(bindvar) // TODO(king-11) make collation aware - result, err := evalengine.NullsafeCompare(rowVal, tp.Lastpk.Rows[0][0], collations.Unknown) + result, err := evalengine.NullsafeCompare(rowVal, tp.Lastpk.Rows[0][0], tp.CollationEnv, collations.Unknown) // If rowVal is > last pk, transaction will be a noop, so don't apply this statement if err == nil && result > 0 { tp.Stats.NoopQueryCount.Add(stmtType, 1) @@ -317,7 +321,7 @@ func (tp *TablePlan) isOutsidePKRange(bindvars map[string]*querypb.BindVariable, func (tp *TablePlan) bindFieldVal(field *querypb.Field, val *sqltypes.Value) (*querypb.BindVariable, error) { if conversion, ok := tp.ConvertCharset[field.Name]; ok && !val.IsNull() { // Non-null string value, for which we have a charset conversion instruction - fromCollation := collations.Local().DefaultCollationForCharset(conversion.FromCharset) + fromCollation := tp.CollationEnv.DefaultCollationForCharset(conversion.FromCharset) if fromCollation == collations.Unknown { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "Character set %s not supported for column %s", conversion.FromCharset, field.Name) } @@ -603,3 +607,74 @@ func valsEqual(v1, v2 sqltypes.Value) bool { // Compare content only if none are null. return v1.ToString() == v2.ToString() } + +// AppendFromRow behaves like Append but takes a querypb.Row directly, assuming that +// the fields in the row are in the same order as the placeholders in this query. The fields might include generated +// columns which are dropped, by checking against skipFields, before binding the variables +// note: there can be more fields than bind locations since extra columns might be requested from the source if not all +// primary keys columns are present in the target table, for example. Also some values in the row may not correspond for +// values from the database on the source: sum/count for aggregation queries, for example +func appendFromRow(pq *sqlparser.ParsedQuery, buf *bytes2.Buffer, fields []*querypb.Field, row *querypb.Row, skipFields map[string]bool) error { + bindLocations := pq.BindLocations() + if len(fields) < len(bindLocations) { + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "wrong number of fields: got %d fields for %d bind locations ", + len(fields), len(bindLocations)) + } + + type colInfo struct { + typ querypb.Type + length int64 + offset int64 + } + rowInfo := make([]*colInfo, 0) + + offset := int64(0) + for i, field := range fields { // collect info required for fields to be bound + length := row.Lengths[i] + if !skipFields[strings.ToLower(field.Name)] { + rowInfo = append(rowInfo, &colInfo{ + typ: field.Type, + length: length, + offset: offset, + }) + } + if length > 0 { + offset += row.Lengths[i] + } + } + + // bind field values to locations + var offsetQuery int + for i, loc := range bindLocations { + col := rowInfo[i] + buf.WriteString(pq.Query[offsetQuery:loc.Offset]) + typ := col.typ + + switch typ { + case querypb.Type_TUPLE: + return vterrors.Errorf(vtrpcpb.Code_INTERNAL, "unexpected Type_TUPLE for value %d", i) + case querypb.Type_JSON: + if col.length < 0 { // An SQL NULL and not an actual JSON value + buf.WriteString(sqltypes.NullStr) + } else { // A JSON value (which may be a JSON null literal value) + buf2 := row.Values[col.offset : col.offset+col.length] + vv, err := vjson.MarshalSQLValue(buf2) + if err != nil { + return err + } + buf.WriteString(vv.RawStr()) + } + default: + if col.length < 0 { + // -1 means a null variable; serialize it directly + buf.WriteString(sqltypes.NullStr) + } else { + vv := sqltypes.MakeTrusted(typ, row.Values[col.offset:col.offset+col.length]) + vv.EncodeSQLBytes2(buf) + } + } + offsetQuery = loc.Offset + loc.Length + } + buf.WriteString(pq.Query[offsetQuery:]) + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go index 780b1c0d064..5dce71cf0f5 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/replicator_plan_test.go @@ -21,7 +21,9 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" @@ -239,7 +241,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) values (:a_c1,:a_c2,:a_c3) on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1", Delete: "update t1 set c2=null where c1=:b_c1", @@ -261,7 +263,7 @@ func TestBuildPlayerPlan(t *testing.T) { PKReferences: []string{"c1", "pk1", "pk2"}, InsertFront: "insert into t1(c1,c2,c3)", InsertValues: "(:a_c1,:a_c2,:a_c3)", - InsertOnDup: "on duplicate key update c2=values(c2)", + InsertOnDup: " on duplicate key update c2=values(c2)", Insert: "insert into t1(c1,c2,c3) select :a_c1, :a_c2, :a_c3 from dual where (:a_pk1,:a_pk2) <= (1,'aaa') on duplicate key update c2=values(c2)", Update: "update t1 set c2=:a_c2 where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", Delete: "update t1 set c2=null where c1=:b_c1 and (:b_pk1,:b_pk2) <= (1,'aaa')", @@ -733,7 +735,7 @@ func TestBuildPlayerPlan(t *testing.T) { } for _, tcase := range testcases { - plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) + plan, err := buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) gotPlan, _ := json.Marshal(plan) wantPlan, _ := json.Marshal(tcase.plan) if string(gotPlan) != string(wantPlan) { @@ -747,7 +749,7 @@ func TestBuildPlayerPlan(t *testing.T) { t.Errorf("Filter err(%v): %s, want %v", tcase.input, gotErr, tcase.err) } - plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats()) + plan, err = buildReplicatorPlan(getSource(tcase.input), PrimaryKeyInfos, copyState, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) if err != nil { continue } @@ -777,7 +779,7 @@ func TestBuildPlayerPlanNoDup(t *testing.T) { Filter: "select * from t", }}, } - _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) + _, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) want := "more than one target for source table t" if err == nil || !strings.Contains(err.Error(), want) { t.Errorf("buildReplicatorPlan err: %v, must contain: %v", err, want) @@ -798,7 +800,7 @@ func TestBuildPlayerPlanExclude(t *testing.T) { Filter: "", }}, } - plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats()) + plan, err := buildReplicatorPlan(getSource(input), PrimaryKeyInfos, nil, binlogplayer.NewStats(), collations.MySQL8(), sqlparser.NewTestParser()) assert.NoError(t, err) want := &TestReplicatorPlan{ diff --git a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go index 715d87186a6..0f94b6b13d2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go +++ b/go/vt/vttablet/tabletmanager/vreplication/table_plan_builder.go @@ -22,6 +22,7 @@ import ( "sort" "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/binlog/binlogplayer" @@ -60,6 +61,8 @@ type tablePlanBuilder struct { stats *binlogplayer.Stats source *binlogdatapb.BinlogSource pkIndices []bool + + collationEnv *collations.Environment } // colExpr describes the processing to be performed to @@ -129,7 +132,7 @@ const ( // The TablePlan built is a partial plan. The full plan for a table is built // when we receive field information from events or rows sent by the source. // buildExecutionPlan is the function that builds the full plan. -func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats) (*ReplicatorPlan, error) { +func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[string][]*ColumnInfo, copyState map[string]*sqltypes.Result, stats *binlogplayer.Stats, collationEnv *collations.Environment, parser *sqlparser.Parser) (*ReplicatorPlan, error) { filter := source.Filter plan := &ReplicatorPlan{ VStreamFilter: &binlogdatapb.Filter{FieldEventMode: filter.FieldEventMode}, @@ -138,6 +141,7 @@ func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[strin ColInfoMap: colInfoMap, stats: stats, Source: source, + collationEnv: collationEnv, } for tableName := range colInfoMap { lastpk, ok := copyState[tableName] @@ -156,7 +160,7 @@ func buildReplicatorPlan(source *binlogdatapb.BinlogSource, colInfoMap map[strin if !ok { return nil, fmt.Errorf("table %s not found in schema", tableName) } - tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source) + tablePlan, err := buildTablePlan(tableName, rule, colInfos, lastpk, stats, source, collationEnv, parser) if err != nil { return nil, err } @@ -196,7 +200,7 @@ func MatchTable(tableName string, filter *binlogdatapb.Filter) (*binlogdatapb.Ru } func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*ColumnInfo, lastpk *sqltypes.Result, - stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource) (*TablePlan, error) { + stats *binlogplayer.Stats, source *binlogdatapb.BinlogSource, collationEnv *collations.Environment, parser *sqlparser.Parser) (*TablePlan, error) { filter := rule.Filter query := filter @@ -213,7 +217,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum case filter == ExcludeStr: return nil, nil } - sel, fromTable, err := analyzeSelectFrom(query) + sel, fromTable, err := analyzeSelectFrom(query, parser) if err != nil { return nil, err } @@ -245,6 +249,7 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum EnumValuesMap: enumValuesMap, ConvertCharset: rule.ConvertCharset, ConvertIntToEnum: rule.ConvertIntToEnum, + CollationEnv: collationEnv, } return tablePlan, nil @@ -256,10 +261,11 @@ func buildTablePlan(tableName string, rule *binlogdatapb.Rule, colInfos []*Colum From: sel.From, Where: sel.Where, }, - lastpk: lastpk, - colInfos: colInfos, - stats: stats, - source: source, + lastpk: lastpk, + colInfos: colInfos, + stats: stats, + source: source, + collationEnv: collationEnv, } if err := tpb.analyzeExprs(sel.SelectExprs); err != nil { @@ -371,11 +377,12 @@ func (tpb *tablePlanBuilder) generate() *TablePlan { TablePlanBuilder: tpb, PartialInserts: make(map[string]*sqlparser.ParsedQuery, 0), PartialUpdates: make(map[string]*sqlparser.ParsedQuery, 0), + CollationEnv: tpb.collationEnv, } } -func analyzeSelectFrom(query string) (sel *sqlparser.Select, from string, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelectFrom(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, from string, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, "", err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index 196ee6aac86..3f4a5f2710e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -219,7 +219,7 @@ func newVCopierCopyWorker( func (vc *vcopier) initTablesForCopy(ctx context.Context) error { defer vc.vr.dbClient.Rollback() - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return err } @@ -385,7 +385,7 @@ func (vc *vcopier) copyTable(ctx context.Context, tableName string, copyState ma log.Infof("Copying table %s, lastpk: %v", tableName, copyState[tableName]) - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go index fe92f284ce8..d0adc970382 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier_atomic.go @@ -54,7 +54,7 @@ func newCopyAllState(vc *vcopier) (*copyAllState, error) { state := ©AllState{ vc: vc, } - plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats) + plan, err := buildReplicatorPlan(vc.vr.source, vc.vr.colInfoMap, nil, vc.vr.stats, vc.vr.vre.collationEnv, vc.vr.vre.parser) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index c222bc11781..f1265a1dd68 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -180,7 +180,7 @@ func (vp *vplayer) play(ctx context.Context) error { return nil } - plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats) + plan, err := buildReplicatorPlan(vp.vr.source, vp.vr.colInfoMap, vp.copyState, vp.vr.stats, vp.vr.vre.collationEnv, vp.vr.vre.parser) if err != nil { vp.vr.stats.ErrorCounts.Add([]string{"Plan"}, 1) return err diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 9c065866c15..575b398c3df 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -350,7 +350,11 @@ func (vr *vreplicator) buildColInfoMap(ctx context.Context) (map[string][]*Colum pks = td.PrimaryKeyColumns } else { // Use a PK equivalent if one exists. - if pks, _, err = vr.mysqld.GetPrimaryKeyEquivalentColumns(ctx, vr.dbClient.DBName(), td.Name); err != nil { + executeFetch := func(query string, maxrows int, wantfields bool) (*sqltypes.Result, error) { + // This sets wantfields to true. + return vr.dbClient.ExecuteFetch(query, maxrows) + } + if pks, _, err = mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, executeFetch, vr.dbClient.DBName(), td.Name); err != nil { return nil, err } // Fall back to using every column in the table if there's no PK or PKE. @@ -726,7 +730,7 @@ func (vr *vreplicator) getTableSecondaryKeys(ctx context.Context, tableName stri } tableSchema := schema.TableDefinitions[0].Schema var secondaryKeys []*sqlparser.IndexDefinition - parsedDDL, err := sqlparser.ParseStrictDDL(tableSchema) + parsedDDL, err := vr.vre.parser.ParseStrictDDL(tableSchema) if err != nil { return secondaryKeys, err } @@ -973,7 +977,7 @@ func (vr *vreplicator) execPostCopyActions(ctx context.Context, tableName string // the table schema and if so move forward and delete the // post_copy_action record. if sqlErr, ok := err.(*sqlerror.SQLError); ok && sqlErr.Number() == sqlerror.ERDupKeyName { - stmt, err := sqlparser.ParseStrictDDL(action.Task) + stmt, err := vr.vre.parser.ParseStrictDDL(action.Task) if err != nil { return failedAlterErr } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index dd4b9dc70f8..c38402dfd22 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -35,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/schemadiff" + "vitess.io/vitess/go/vt/sqlparser" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" @@ -183,7 +184,10 @@ func TestPrimaryKeyEquivalentColumns(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require.NoError(t, env.Mysqld.ExecuteSuperQuery(ctx, tt.ddl)) - cols, indexName, err := env.Mysqld.GetPrimaryKeyEquivalentColumns(ctx, env.Dbcfgs.DBName, tt.table) + conn, err := env.Mysqld.GetDbaConnection(ctx) + require.NoError(t, err, "could not connect to mysqld: %v", err) + defer conn.Close() + cols, indexName, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, conn.ExecuteFetch, env.Dbcfgs.DBName, tt.table) if (err != nil) != tt.wantErr { t.Errorf("Mysqld.GetPrimaryKeyEquivalentColumns() error = %v, wantErr %v", err, tt.wantErr) return @@ -549,7 +553,7 @@ func TestDeferSecondaryKeys(t *testing.T) { // order in the table schema. if !tcase.expectFinalSchemaDiff { currentDDL := getCurrentDDL(tcase.tableName) - sdiff, err := schemadiff.DiffCreateTablesQueries(currentDDL, tcase.initialDDL, diffHints) + sdiff, err := schemadiff.DiffCreateTablesQueries(currentDDL, tcase.initialDDL, diffHints, sqlparser.NewTestParser()) require.NoError(t, err) require.Nil(t, sdiff, "Expected no schema difference but got: %s", sdiff.CanonicalStatementString()) } diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn.go b/go/vt/vttablet/tabletserver/connpool/dbconn.go index 63f4c73520e..7876cdf00db 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn.go @@ -81,7 +81,7 @@ func newPooledConn(ctx context.Context, pool *Pool, appParams dbconfigs.Connecto } // NewConn creates a new Conn without a pool. -func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting) (*Conn, error) { +func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpool.ConnectionPool, setting *smartconnpool.Setting, env tabletenv.Env) (*Conn, error) { c, err := dbconnpool.NewDBConnection(ctx, params) if err != nil { return nil, err @@ -90,6 +90,7 @@ func NewConn(ctx context.Context, params dbconfigs.Connector, dbaPool *dbconnpoo conn: c, dbaPool: dbaPool, stats: tabletenv.NewStats(servenv.NewExporter("Temp", "Tablet")), + env: env, } dbconn.current.Store("") if setting == nil { @@ -483,9 +484,9 @@ func (dbc *Conn) CurrentForLogging() string { if dbc.env != nil && dbc.env.Config() != nil && !dbc.env.Config().SanitizeLogMessages { queryToLog = dbc.Current() } else { - queryToLog, _ = sqlparser.RedactSQLQuery(dbc.Current()) + queryToLog, _ = dbc.env.SQLParser().RedactSQLQuery(dbc.Current()) } - return sqlparser.TruncateForLog(queryToLog) + return dbc.env.SQLParser().TruncateForLog(queryToLog) } func (dbc *Conn) applySameSetting(ctx context.Context) (err error) { diff --git a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go index 9717c95d9f7..3687ed00c4f 100644 --- a/go/vt/vttablet/tabletserver/connpool/dbconn_test.go +++ b/go/vt/vttablet/tabletserver/connpool/dbconn_test.go @@ -27,12 +27,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - - "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) func compareTimingCounts(t *testing.T, op string, delta int64, before, after map[string]int64) { @@ -62,11 +65,12 @@ func TestDBConnExec(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -135,11 +139,12 @@ func TestDBConnExecLost(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -193,14 +198,15 @@ func TestDBConnDeadline(t *testing.T) { connPool := newPool() mysqlTimings := connPool.env.Stats().MySQLTimings startCounts := mysqlTimings.Counts() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() db.SetConnDelay(100 * time.Millisecond) ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(50*time.Millisecond)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -251,9 +257,10 @@ func TestDBConnKill(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -297,9 +304,10 @@ func TestDBConnClose(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) require.NoError(t, err) defer dbConn.Close() @@ -322,9 +330,10 @@ func TestDBConnClose(t *testing.T) { func TestDBNoPoolConnKill(t *testing.T) { db := fakesqldb.New(t) connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := NewConn(context.Background(), db.ConnParams(), connPool.dbaPool, nil) + dbConn, err := NewConn(context.Background(), params, connPool.dbaPool, nil, tabletenv.NewEnv(nil, "TestDBNoPoolConnKill", collations.MySQL8(), sqlparser.NewTestParser())) if dbConn != nil { defer dbConn.Close() } @@ -376,11 +385,12 @@ func TestDBConnStream(t *testing.T) { } db.AddQuery(sql, expectedResult) connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx, cancel := context.WithDeadline(context.Background(), time.Now().Add(10*time.Second)) defer cancel() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) if dbConn != nil { defer dbConn.Close() } @@ -436,9 +446,10 @@ func TestDBConnStreamKill(t *testing.T) { } db.AddQuery(sql, expectedResult) connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) require.NoError(t, err) defer dbConn.Close() @@ -465,10 +476,11 @@ func TestDBConnReconnect(t *testing.T) { defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() - dbConn, err := newPooledConn(context.Background(), connPool, db.ConnParams()) + dbConn, err := newPooledConn(context.Background(), connPool, params) require.NoError(t, err) defer dbConn.Close() @@ -490,11 +502,12 @@ func TestDBConnReApplySetting(t *testing.T) { db.OrderMatters() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() ctx := context.Background() - dbConn, err := newPooledConn(ctx, connPool, db.ConnParams()) + dbConn, err := newPooledConn(ctx, connPool, params) require.NoError(t, err) defer dbConn.Close() diff --git a/go/vt/vttablet/tabletserver/connpool/pool.go b/go/vt/vttablet/tabletserver/connpool/pool.go index a8eb2c52d83..567745e37b5 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool.go +++ b/go/vt/vttablet/tabletserver/connpool/pool.go @@ -126,7 +126,7 @@ func (cp *Pool) Get(ctx context.Context, setting *smartconnpool.Setting) (*Poole defer span.Finish() if cp.isCallerIDAppDebug(ctx) { - conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting) + conn, err := NewConn(ctx, cp.appDebugParams, cp.dbaPool, setting, cp.env) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/connpool/pool_test.go b/go/vt/vttablet/tabletserver/connpool/pool_test.go index ff43388d12c..f4e6d6fa008 100644 --- a/go/vt/vttablet/tabletserver/connpool/pool_test.go +++ b/go/vt/vttablet/tabletserver/connpool/pool_test.go @@ -24,10 +24,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/pools/smartconnpool" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -35,7 +38,8 @@ func TestConnPoolGet(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() dbConn, err := connPool.Get(context.Background(), nil) if err != nil { @@ -56,8 +60,9 @@ func TestConnPoolTimeout(t *testing.T) { } cfg.Timeout = time.Second cfg.IdleTimeout = 10 * time.Second - connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", cfg) - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + connPool := NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8(), sqlparser.NewTestParser()), "TestPool", cfg) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() dbConn, err := connPool.Get(context.Background(), nil) require.NoError(t, err) @@ -68,10 +73,11 @@ func TestConnPoolTimeout(t *testing.T) { func TestConnPoolGetEmptyDebugConfig(t *testing.T) { db := fakesqldb.New(t) - debugConn := db.ConnParamsWithUname("") + debugConn := dbconfigs.New(db.ConnParamsWithUname("")) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), debugConn) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, debugConn) im := callerid.NewImmediateCallerID("") ecid := callerid.NewEffectiveCallerID("p", "c", "sc") ctx := context.Background() @@ -89,14 +95,15 @@ func TestConnPoolGetEmptyDebugConfig(t *testing.T) { func TestConnPoolGetAppDebug(t *testing.T) { db := fakesqldb.New(t) - debugConn := db.ConnParamsWithUname("debugUsername") + debugConn := dbconfigs.New(db.ConnParamsWithUname("debugUsername")) ctx := context.Background() im := callerid.NewImmediateCallerID("debugUsername") ecid := callerid.NewEffectiveCallerID("p", "c", "sc") ctx = callerid.NewContext(ctx, ecid, im) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), debugConn) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, debugConn) defer connPool.Close() dbConn, err := connPool.Get(ctx, nil) if err != nil { @@ -115,7 +122,8 @@ func TestConnPoolSetCapacity(t *testing.T) { db := fakesqldb.New(t) defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.Panics(t, func() { @@ -134,7 +142,8 @@ func TestConnPoolStatJSON(t *testing.T) { if connPool.StatsJSON() != "{}" { t.Fatalf("pool is closed, stats json should be empty; was: %q", connPool.StatsJSON()) } - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() statsJSON := connPool.StatsJSON() if statsJSON == "" || statsJSON == "{}" { @@ -153,7 +162,8 @@ func TestConnPoolStateWhilePoolIsOpen(t *testing.T) { defer db.Close() idleTimeout := 10 * time.Second connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.EqualValues(t, 100, connPool.Capacity(), "pool capacity should be 100") assert.EqualValues(t, 0, connPool.Metrics.WaitTime(), "pool wait time should be 0") @@ -179,7 +189,8 @@ func TestConnPoolStateWithSettings(t *testing.T) { defer db.Close() capacity := 5 connPool := newPoolWithCapacity(capacity) - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() assert.EqualValues(t, 5, connPool.Available(), "pool available connections should be 5") assert.EqualValues(t, 0, connPool.Active(), "pool active connections should be 0") @@ -294,7 +305,8 @@ func TestPoolGetConnTime(t *testing.T) { defer db.Close() connPool := newPool() - connPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + connPool.Open(params, params, params) defer connPool.Close() connPool.getConnTime.Reset() @@ -325,7 +337,7 @@ func newPool() *Pool { } func newPoolWithCapacity(capacity int) *Pool { - return NewPool(tabletenv.NewEnv(nil, "PoolTest"), "TestPool", tabletenv.ConnPoolConfig{ + return NewPool(tabletenv.NewEnv(nil, "PoolTest", collations.MySQL8(), sqlparser.NewTestParser()), "TestPool", tabletenv.ConnPoolConfig{ Size: capacity, IdleTimeout: 10 * time.Second, }) diff --git a/go/vt/vttablet/tabletserver/exclude_race_test.go b/go/vt/vttablet/tabletserver/exclude_race_test.go index 6e55671ac96..ee4364968c3 100644 --- a/go/vt/vttablet/tabletserver/exclude_race_test.go +++ b/go/vt/vttablet/tabletserver/exclude_race_test.go @@ -10,6 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" @@ -26,7 +27,13 @@ func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { tl := newTestLogger() defer tl.Close() logStats := tabletenv.NewLogStats(ctx, "TestHandlePanicAndSendLogStatsMessageTruncation") - db, tsv := setupTabletServerTest(t, ctx, "") + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) + + db, tsv := setupTabletServerTestCustom(t, ctx, tabletenv.NewDefaultConfig(), "", parser) defer tsv.StopService() defer db.Close() @@ -37,9 +44,6 @@ func TestHandlePanicAndSendLogStatsMessageTruncation(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3333333333), "bv4": sqltypes.Int64BindVariable(4444444444), } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) defer func() { err := logStats.Error diff --git a/go/vt/vttablet/tabletserver/fuzz.go b/go/vt/vttablet/tabletserver/fuzz.go index fb14455d3f4..c7f3dabde97 100644 --- a/go/vt/vttablet/tabletserver/fuzz.go +++ b/go/vt/vttablet/tabletserver/fuzz.go @@ -23,8 +23,10 @@ import ( fuzz "github.com/AdaLogics/go-fuzz-headers" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -57,7 +59,7 @@ func FuzzGetPlan(data []byte) int { // Set up the environment config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) defer qe.Close() diff --git a/go/vt/vttablet/tabletserver/gc/tablegc.go b/go/vt/vttablet/tabletserver/gc/tablegc.go index 928d102624f..17cb058e8da 100644 --- a/go/vt/vttablet/tabletserver/gc/tablegc.go +++ b/go/vt/vttablet/tabletserver/gc/tablegc.go @@ -276,7 +276,6 @@ func (collector *TableGC) operate(ctx context.Context) { // find something new to do. go tableCheckTicker.TickNow() case <-tableCheckTicker.C: - log.Info("TableGC: tableCheckTicker") if err := collector.readAndCheckTables(ctx, dropTablesChan, transitionRequestsChan); err != nil { log.Error(err) } @@ -415,8 +414,6 @@ func (collector *TableGC) readAndCheckTables( // readTables reads the list of _vt_% tables from the database func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, err error) { - log.Infof("TableGC: read tables") - conn, err := collector.pool.Get(ctx, nil) if err != nil { return nil, err @@ -441,8 +438,6 @@ func (collector *TableGC) readTables(ctx context.Context) (gcTables []*gcTable, // It lists _vt_% tables, then filters through those which are due-date. // It then applies the necessary operation per table. func (collector *TableGC) checkTables(ctx context.Context, gcTables []*gcTable, dropTablesChan chan<- *gcTable, transitionRequestsChan chan<- *transitionRequest) error { - log.Infof("TableGC: check tables") - for i := range gcTables { table := gcTables[i] // we capture as local variable as we will later use this in a goroutine shouldTransition, state, uuid, err := collector.shouldTransitionTable(table.tableName) diff --git a/go/vt/vttablet/tabletserver/health_streamer_test.go b/go/vt/vttablet/tabletserver/health_streamer_test.go index 3220bd3ffe7..1a7a1392efb 100644 --- a/go/vt/vttablet/tabletserver/health_streamer_test.go +++ b/go/vt/vttablet/tabletserver/health_streamer_test.go @@ -30,10 +30,11 @@ import ( "google.golang.org/protobuf/proto" "vitess.io/vitess/go/constants/sidecar" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" @@ -45,7 +46,7 @@ func TestHealthStreamerClosed(t *testing.T) { db := fakesqldb.New(t) defer db.Close() config := newConfig(db) - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -72,7 +73,7 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { config := newConfig(db) config.SignalWhenSchemaChange = true - env := tabletenv.NewEnv(config, "TestNotServingPrimary") + env := tabletenv.NewEnv(config, "TestNotServingPrimary", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -84,7 +85,7 @@ func TestNotServingPrimaryNoWrite(t *testing.T) { hs.Open() defer hs.Close() target := &querypb.Target{} - hs.InitDBConfig(target, db.ConnParams()) + hs.InitDBConfig(target, dbconfigs.New(db.ConnParams())) // Let's say the tablet goes to a non-serving primary state. hs.MakePrimary(false) @@ -103,7 +104,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { config := newConfig(db) config.SignalWhenSchemaChange = false - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -114,7 +115,7 @@ func TestHealthStreamerBroadcast(t *testing.T) { hs.Open() defer hs.Close() target := &querypb.Target{} - hs.InitDBConfig(target, db.ConnParams()) + hs.InitDBConfig(target, dbconfigs.New(db.ConnParams())) ch, cancel := testStream(hs) defer cancel() @@ -218,7 +219,7 @@ func TestReloadSchema(t *testing.T) { config.SignalWhenSchemaChange = testcase.enableSchemaChange config.SchemaReloadInterval = 100 * time.Millisecond - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, @@ -336,7 +337,7 @@ func TestReloadView(t *testing.T) { config.SchemaReloadInterval = 100 * time.Millisecond config.EnableViews = true - env := tabletenv.NewEnv(config, "TestReloadView") + env := tabletenv.NewEnv(config, "TestReloadView", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{Cell: "cell", Uid: 1} se := schema.NewEngine(env) hs := newHealthStreamer(env, alias, se) diff --git a/go/vt/vttablet/tabletserver/livequeryz_test.go b/go/vt/vttablet/tabletserver/livequeryz_test.go index 18e62047226..e507f365afb 100644 --- a/go/vt/vttablet/tabletserver/livequeryz_test.go +++ b/go/vt/vttablet/tabletserver/livequeryz_test.go @@ -17,18 +17,19 @@ limitations under the License. package tabletserver import ( + "context" "net/http" "net/http/httptest" "testing" - "context" + "vitess.io/vitess/go/vt/sqlparser" ) func TestLiveQueryzHandlerJSON(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/?format=json", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) @@ -39,7 +40,7 @@ func TestLiveQueryzHandlerHTTP(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz/", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 1})) queryList.Add(NewQueryDetail(context.Background(), &testConn{id: 2})) @@ -50,7 +51,7 @@ func TestLiveQueryzHandlerHTTPFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz/", nil) - livequeryzHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -61,7 +62,7 @@ func TestLiveQueryzHandlerTerminateConn(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=1", nil) - queryList := NewQueryList("test") + queryList := NewQueryList("test", sqlparser.NewTestParser()) testConn := &testConn{id: 1} queryList.Add(NewQueryDetail(context.Background(), testConn)) if testConn.IsKilled() { @@ -77,7 +78,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidConnID(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("GET", "/livequeryz//terminate?connID=invalid", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) @@ -88,7 +89,7 @@ func TestLiveQueryzHandlerTerminateFailedInvalidForm(t *testing.T) { resp := httptest.NewRecorder() req, _ := http.NewRequest("POST", "/livequeryz//terminate?inva+lid=2", nil) - livequeryzTerminateHandler([]*QueryList{NewQueryList("test")}, resp, req) + livequeryzTerminateHandler([]*QueryList{NewQueryList("test", sqlparser.NewTestParser())}, resp, req) if resp.Code != http.StatusInternalServerError { t.Fatalf("http call should fail and return code: %d, but got: %d", http.StatusInternalServerError, resp.Code) diff --git a/go/vt/vttablet/tabletserver/messager/engine_test.go b/go/vt/vttablet/tabletserver/messager/engine_test.go index e134a6fbe21..ac817918f48 100644 --- a/go/vt/vttablet/tabletserver/messager/engine_test.go +++ b/go/vt/vttablet/tabletserver/messager/engine_test.go @@ -21,7 +21,7 @@ import ( "reflect" "testing" - "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" @@ -67,9 +67,7 @@ var ( ) func TestEngineSchemaChanged(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) + engine := newTestEngine() defer engine.Close() engine.schemaChanged(nil, []*schema.Table{meTableT1, tableT2}, nil, nil) @@ -110,9 +108,7 @@ func extractManagerNames(in map[string]*messageManager) map[string]bool { } func TestSubscribe(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) + engine := newTestEngine() engine.schemaChanged(nil, []*schema.Table{meTableT1, meTableT2}, nil, nil) f1, ch1 := newEngineReceiver() f2, ch2 := newEngineReceiver() @@ -142,9 +138,7 @@ func TestSubscribe(t *testing.T) { } func TestEngineGenerate(t *testing.T) { - db := fakesqldb.New(t) - defer db.Close() - engine := newTestEngine(db) + engine := newTestEngine() defer engine.Close() engine.schemaChanged(nil, []*schema.Table{meTableT1}, nil, nil) @@ -157,10 +151,10 @@ func TestEngineGenerate(t *testing.T) { } } -func newTestEngine(db *fakesqldb.DB) *Engine { +func newTestEngine() *Engine { config := tabletenv.NewDefaultConfig() tsv := &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest"), + Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8(), sqlparser.NewTestParser()), } se := schema.NewEngine(tsv) te := NewEngine(tsv, se, newFakeVStreamer()) diff --git a/go/vt/vttablet/tabletserver/messager/message_manager_test.go b/go/vt/vttablet/tabletserver/messager/message_manager_test.go index 5c5ab47d3c8..95bd1fb2b01 100644 --- a/go/vt/vttablet/tabletserver/messager/message_manager_test.go +++ b/go/vt/vttablet/tabletserver/messager/message_manager_test.go @@ -31,6 +31,7 @@ import ( "github.com/stretchr/testify/assert" "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/sqlparser" @@ -833,7 +834,7 @@ type fakeTabletServer struct { func newFakeTabletServer() *fakeTabletServer { config := tabletenv.NewDefaultConfig() return &fakeTabletServer{ - Env: tabletenv.NewEnv(config, "MessagerTest"), + Env: tabletenv.NewEnv(config, "MessagerTest", collations.MySQL8(), sqlparser.NewTestParser()), } } diff --git a/go/vt/vttablet/tabletserver/planbuilder/builder.go b/go/vt/vttablet/tabletserver/planbuilder/builder.go index 4594f6350f6..b8e88916e30 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/builder.go +++ b/go/vt/vttablet/tabletserver/planbuilder/builder.go @@ -19,6 +19,7 @@ package planbuilder import ( "strings" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vtgate/evalengine" @@ -27,7 +28,7 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" ) -func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan *Plan, err error) { +func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table, collationEnv *collations.Environment) (plan *Plan, err error) { plan = &Plan{ PlanID: PlanSelect, FullQuery: GenerateLimitQuery(sel), @@ -48,7 +49,10 @@ func analyzeSelect(sel *sqlparser.Select, tables map[string]*schema.Table) (plan return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "%s is not a sequence", sqlparser.ToString(sel.From)) } plan.PlanID = PlanNextval - v, err := evalengine.Translate(nextVal.Expr, nil) + v, err := evalengine.Translate(nextVal.Expr, &evalengine.Config{ + CollationEnv: collationEnv, + Collation: collationEnv.DefaultConnectionCharset(), + }) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go index 17baa72595e..aac0ed1f64a 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/permission_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/permission_test.go @@ -178,7 +178,7 @@ func TestBuildPermissions(t *testing.T) { }} for _, tcase := range tcases { - stmt, err := sqlparser.Parse(tcase.input) + stmt, err := sqlparser.NewTestParser().Parse(tcase.input) if err != nil { t.Fatal(err) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan.go b/go/vt/vttablet/tabletserver/planbuilder/plan.go index 6f491692241..5d05159b185 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan.go @@ -20,11 +20,11 @@ import ( "encoding/json" "strings" - "vitess.io/vitess/go/vt/vtgate/evalengine" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vtgate/evalengine" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -202,7 +202,7 @@ func (plan *Plan) TableNames() (names []string) { } // Build builds a plan based on the schema. -func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbName string, viewsEnabled bool) (plan *Plan, err error) { +func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbName string, viewsEnabled bool, collationEnv *collations.Environment) (plan *Plan, err error) { switch stmt := statement.(type) { case *sqlparser.Union: plan, err = &Plan{ @@ -210,7 +210,7 @@ func Build(statement sqlparser.Statement, tables map[string]*schema.Table, dbNam FullQuery: GenerateLimitQuery(stmt), }, nil case *sqlparser.Select: - plan, err = analyzeSelect(stmt, tables) + plan, err = analyzeSelect(stmt, tables, collationEnv) case *sqlparser.Insert: plan, err = analyzeInsert(stmt, tables) case *sqlparser.Update: @@ -323,7 +323,7 @@ func hasLockFunc(sel *sqlparser.Select) bool { } // BuildSettingQuery builds a query for system settings. -func BuildSettingQuery(settings []string) (query string, resetQuery string, err error) { +func BuildSettingQuery(settings []string, parser *sqlparser.Parser) (query string, resetQuery string, err error) { if len(settings) == 0 { return "", "", vterrors.Errorf(vtrpcpb.Code_INTERNAL, "[BUG]: plan called for empty system settings") } @@ -331,7 +331,7 @@ func BuildSettingQuery(settings []string) (query string, resetQuery string, err var resetSetExprs sqlparser.SetExprs lDefault := sqlparser.NewStrLiteral("default") for _, setting := range settings { - stmt, err := sqlparser.Parse(setting) + stmt, err := parser.Parse(setting) if err != nil { return "", "", vterrors.Wrapf(err, "[BUG]: failed to parse system setting: %s", setting) } diff --git a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go index 7c1f364cac8..76a4c269dd4 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/plan_test.go +++ b/go/vt/vttablet/tabletserver/planbuilder/plan_test.go @@ -30,6 +30,7 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tableacl" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" @@ -73,6 +74,7 @@ func TestDDLPlan(t *testing.T) { func testPlan(t *testing.T, fileName string) { t.Helper() + parser := sqlparser.NewTestParser() testSchema := loadSchema("schema_test.json") for tcase := range iterateExecFile(fileName) { t.Run(tcase.input, func(t *testing.T) { @@ -81,9 +83,9 @@ func testPlan(t *testing.T, fileName string) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } PassthroughDMLs = false @@ -111,6 +113,7 @@ func testPlan(t *testing.T, fileName string) { func TestPlanInReservedConn(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("exec_cases.txt") { t.Run(tcase.input, func(t *testing.T) { if strings.Contains(tcase.options, "PassthroughDMLs") { @@ -118,9 +121,9 @@ func TestPlanInReservedConn(t *testing.T) { } var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } PassthroughDMLs = false @@ -154,6 +157,7 @@ func TestCustom(t *testing.T) { t.Log("No schemas to test") return } + parser := sqlparser.NewTestParser() for _, schemFile := range testSchemas { schem := loadSchema(schemFile) t.Logf("Testing schema %s", schemFile) @@ -167,11 +171,11 @@ func TestCustom(t *testing.T) { for _, file := range files { t.Logf("Testing file %s", file) for tcase := range iterateExecFile(file) { - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err != nil { t.Fatalf("Got error: %v, parsing sql: %v", err.Error(), tcase.input) } - plan, err := Build(statement, schem, "dbName", false) + plan, err := Build(statement, schem, "dbName", false, collations.MySQL8()) var out string if err != nil { out = err.Error() @@ -192,10 +196,11 @@ func TestCustom(t *testing.T) { func TestStreamPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("stream_cases.txt") { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { plan, err = BuildStreaming(statement, testSchema) } @@ -252,13 +257,14 @@ func TestMessageStreamingPlan(t *testing.T) { func TestLockPlan(t *testing.T) { testSchema := loadSchema("schema_test.json") + parser := sqlparser.NewTestParser() for tcase := range iterateExecFile("lock_cases.txt") { t.Run(tcase.input, func(t *testing.T) { var plan *Plan var err error - statement, err := sqlparser.Parse(tcase.input) + statement, err := parser.Parse(tcase.input) if err == nil { - plan, err = Build(statement, testSchema, "dbName", false) + plan, err = Build(statement, testSchema, "dbName", false, collations.MySQL8()) } var out string diff --git a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt index 1e9dbf6ad12..977b3822050 100644 --- a/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt +++ b/go/vt/vttablet/tabletserver/planbuilder/testdata/exec_cases.txt @@ -339,7 +339,7 @@ } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal update @@ -355,7 +355,7 @@ options:PassthroughDMLs } ], "FullQuery": "update d set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # cross-db update @@ -370,7 +370,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a.b set foo = 'foo' where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # update unknown table @@ -385,7 +385,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # update unknown table @@ -401,7 +401,7 @@ options:PassthroughDMLs } ], "FullQuery": "update bogus set `name` = 'foo' where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } # multi-table update @@ -420,7 +420,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a, b set a.`name` = 'foo' where a.id = b.id and b.var = 'test'", - "WhereClause": "where a.id = b.id and b.var = 'test'" + "WhereClause": " where a.id = b.id and b.var = 'test'" } # multi-table update @@ -440,7 +440,7 @@ options:PassthroughDMLs } ], "FullQuery": "update a join b on a.id = b.id set a.`name` = 'foo' where b.var = 'test'", - "WhereClause": "where b.var = 'test'" + "WhereClause": " where b.var = 'test'" } @@ -499,7 +499,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b') limit :#maxLimit", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # normal delete @@ -515,7 +515,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete from d where `name` in ('a', 'b')", - "WhereClause": "where `name` in ('a', 'b')" + "WhereClause": " where `name` in ('a', 'b')" } # delete unknown table @@ -563,7 +563,7 @@ options:PassthroughDMLs } ], "FullQuery": "delete a, b from a, b where id = 1", - "WhereClause": "where id = 1" + "WhereClause": " where id = 1" } diff --git a/go/vt/vttablet/tabletserver/query_engine.go b/go/vt/vttablet/tabletserver/query_engine.go index 0afe76d14ce..26b89934628 100644 --- a/go/vt/vttablet/tabletserver/query_engine.go +++ b/go/vt/vttablet/tabletserver/query_engine.go @@ -359,11 +359,11 @@ func (qe *QueryEngine) Close() { var errNoCache = errors.New("plan should not be cached") func (qe *QueryEngine) getPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.SQLParser().Parse(sql) if err != nil { return nil, err } - splan, err := planbuilder.Build(statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews) + splan, err := planbuilder.Build(statement, curSchema.tables, qe.env.Config().DB.DBName, qe.env.Config().EnableViews, qe.env.CollationEnv()) if err != nil { return nil, err } @@ -402,7 +402,7 @@ func (qe *QueryEngine) GetPlan(ctx context.Context, logStats *tabletenv.LogStats } func (qe *QueryEngine) getStreamPlan(curSchema *currentSchema, sql string) (*TabletPlan, error) { - statement, err := sqlparser.Parse(sql) + statement, err := qe.env.SQLParser().Parse(sql) if err != nil { return nil, err } @@ -479,7 +479,7 @@ func (qe *QueryEngine) GetConnSetting(ctx context.Context, settings []string) (* cacheKey := SettingsCacheKey(buf.String()) connSetting, _, err := qe.settings.GetOrLoad(cacheKey, 0, func() (*smartconnpool.Setting, error) { // build the setting queries - query, resetQuery, err := planbuilder.BuildSettingQuery(settings) + query, resetQuery, err := planbuilder.BuildSettingQuery(settings, qe.env.SQLParser()) if err != nil { return nil, err } @@ -609,7 +609,7 @@ func (qe *QueryEngine) handleHTTPQueryPlans(response http.ResponseWriter, reques response.Header().Set("Content-Type", "text/plain") qe.ForEachPlan(func(plan *TabletPlan) bool { - response.Write([]byte(fmt.Sprintf("%#v\n", sqlparser.TruncateForUI(plan.Original)))) + response.Write([]byte(fmt.Sprintf("%#v\n", qe.env.SQLParser().TruncateForUI(plan.Original)))) if b, err := json.MarshalIndent(plan.Plan, "", " "); err != nil { response.Write([]byte(err.Error())) } else { @@ -629,7 +629,7 @@ func (qe *QueryEngine) handleHTTPQueryStats(response http.ResponseWriter, reques var qstats []perQueryStats qe.ForEachPlan(func(plan *TabletPlan) bool { var pqstats perQueryStats - pqstats.Query = unicoded(sqlparser.TruncateForUI(plan.Original)) + pqstats.Query = unicoded(qe.env.SQLParser().TruncateForUI(plan.Original)) pqstats.Table = plan.TableName().String() pqstats.Plan = plan.PlanID pqstats.QueryCount, pqstats.Time, pqstats.MysqlTime, pqstats.RowsAffected, pqstats.RowsReturned, pqstats.ErrorCount = plan.Stats() @@ -697,7 +697,7 @@ func (qe *QueryEngine) handleHTTPConsolidations(response http.ResponseWriter, re for _, v := range items { var query string if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(v.Query) + query, _ = qe.env.SQLParser().RedactSQLQuery(v.Query) } else { query = v.Query } diff --git a/go/vt/vttablet/tabletserver/query_engine_test.go b/go/vt/vttablet/tabletserver/query_engine_test.go index f20b5522140..f38d1a9e3cb 100644 --- a/go/vt/vttablet/tabletserver/query_engine_test.go +++ b/go/vt/vttablet/tabletserver/query_engine_test.go @@ -32,6 +32,7 @@ import ( "time" "vitess.io/vitess/go/cache/theine" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/vt/sqlparser" @@ -62,7 +63,7 @@ func TestStrictMode(t *testing.T) { // Test default behavior. config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.se.InitDBConfig(newDBConfigs(db).DbaWithDB()) @@ -355,7 +356,7 @@ func newTestQueryEngine(idleTimeout time.Duration, strict bool, dbcfgs *dbconfig config.OltpReadPool.IdleTimeout = idleTimeout config.OlapReadPool.IdleTimeout = idleTimeout config.TxPool.IdleTimeout = idleTimeout - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) // the integration tests that check cache behavior do not expect a doorkeeper; disable it @@ -455,7 +456,7 @@ func benchmarkPlanCache(b *testing.B, db *fakesqldb.DB, par int) { config := tabletenv.NewDefaultConfig() config.DB = dbcfgs - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -513,7 +514,7 @@ func TestPlanCachePollution(t *testing.T) { config.DB = dbcfgs // config.LFUQueryCacheSizeBytes = 3 * 1024 * 1024 - env := tabletenv.NewEnv(config, "TabletServerTest") + env := tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) @@ -829,7 +830,7 @@ func TestAddQueryStats(t *testing.T) { config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(fakesqldb.New(t)) config.EnablePerWorkloadTableMetrics = testcase.enablePerWorkloadTableMetrics - env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name) + env := tabletenv.NewEnv(config, "TestAddQueryStats_"+testcase.name, collations.MySQL8(), sqlparser.NewTestParser()) se := schema.NewEngine(env) qe := NewQueryEngine(env, se) qe.AddStats(testcase.planType, testcase.tableName, testcase.workload, testcase.tabletType, testcase.queryCount, testcase.duration, testcase.mysqlTime, testcase.rowsAffected, testcase.rowsReturned, testcase.errorCount, testcase.errorCode) @@ -868,9 +869,9 @@ func TestPlanPoolUnsafe(t *testing.T) { } for _, tcase := range tcases { t.Run(tcase.name, func(t *testing.T) { - statement, err := sqlparser.Parse(tcase.query) + statement, err := sqlparser.NewTestParser().Parse(tcase.query) require.NoError(t, err) - plan, err := planbuilder.Build(statement, map[string]*schema.Table{}, "dbName", false) + plan, err := planbuilder.Build(statement, map[string]*schema.Table{}, "dbName", false, collations.MySQL8()) // Plan building will not fail, but it will mark that reserved connection is needed. // checking plan is valid will fail. require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/query_executor.go b/go/vt/vttablet/tabletserver/query_executor.go index 862d41b115d..e586a7f2d64 100644 --- a/go/vt/vttablet/tabletserver/query_executor.go +++ b/go/vt/vttablet/tabletserver/query_executor.go @@ -28,8 +28,6 @@ import ( "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/callerid" @@ -616,13 +614,13 @@ func (*QueryExecutor) BeginAgain(ctx context.Context, dc *StatefulConnection) er } func (qre *QueryExecutor) execNextval() (*sqltypes.Result, error) { - env := evalengine.NewExpressionEnv(qre.ctx, qre.bindVars, nil) + env := evalengine.NewExpressionEnv(qre.ctx, qre.bindVars, evalengine.NewEmptyVCursor(qre.tsv.collationEnv, time.Local)) result, err := env.Evaluate(qre.plan.NextCount) if err != nil { return nil, err } tableName := qre.plan.TableName() - v := result.Value(collations.Default()) + v := result.Value(qre.tsv.collationEnv.DefaultConnectionCharset()) inc, err := v.ToInt64() if err != nil || inc < 1 { return nil, vterrors.Errorf(vtrpcpb.Code_INVALID_ARGUMENT, "invalid increment for sequence %s: %s", tableName, v.String()) @@ -759,7 +757,7 @@ func (qre *QueryExecutor) verifyRowCount(count, maxrows int64) error { if warnThreshold > 0 && count > warnThreshold { callerID := callerid.ImmediateCallerIDFromContext(qre.ctx) qre.tsv.Stats().Warnings.Add("ResultsExceeded", 1) - log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true)) + log.Warningf("caller id: %s row count %v exceeds warning threshold %v: %q", callerID.Username, count, warnThreshold, queryAsString(qre.plan.FullQuery.Query, qre.bindVars, qre.tsv.Config().SanitizeLogMessages, true, qre.tsv.SQLParser())) } return nil } @@ -1148,7 +1146,7 @@ func (qre *QueryExecutor) GetSchemaDefinitions(tableType querypb.SchemaTableType } func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchViewQuery(viewNames) + query, err := eschema.GetFetchViewQuery(viewNames, qre.tsv.SQLParser()) if err != nil { return err } @@ -1156,7 +1154,7 @@ func (qre *QueryExecutor) getViewDefinitions(viewNames []string, callback func(s } func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableQuery(tableNames) + query, err := eschema.GetFetchTableQuery(tableNames, qre.tsv.SQLParser()) if err != nil { return err } @@ -1164,7 +1162,7 @@ func (qre *QueryExecutor) getTableDefinitions(tableNames []string, callback func } func (qre *QueryExecutor) getAllDefinitions(tableNames []string, callback func(schemaRes *querypb.GetSchemaResponse) error) error { - query, err := eschema.GetFetchTableAndViewsQuery(tableNames) + query, err := eschema.GetFetchTableAndViewsQuery(tableNames, qre.tsv.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/query_executor_test.go b/go/vt/vttablet/tabletserver/query_executor_test.go index 6ea3c90d989..05888a8b77d 100644 --- a/go/vt/vttablet/tabletserver/query_executor_test.go +++ b/go/vt/vttablet/tabletserver/query_executor_test.go @@ -28,7 +28,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/sync2" @@ -1487,7 +1490,7 @@ func newTestTabletServer(ctx context.Context, flags executorFlags, db *fakesqldb } dbconfigs := newDBConfigs(db) config.DB = dbconfigs - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) target := &querypb.Target{TabletType: topodatapb.TabletType_PRIMARY} err := tsv.StartService(target, dbconfigs, nil /* mysqld */) if config.TwoPCEnable { @@ -1565,7 +1568,7 @@ func initQueryExecutorTestDB(db *fakesqldb.DB) { "varchar|int64"), "Innodb_rows_read|0", )) - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) } func getTestTableFields() []*querypb.Field { @@ -1658,7 +1661,7 @@ func addQueryExecutorSupportedQueries(db *fakesqldb.DB) { fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + sidecardb.AddSchemaInitQueries(db, true, sqlparser.NewTestParser()) for query, result := range queryResultMap { db.AddQuery(query, result) } diff --git a/go/vt/vttablet/tabletserver/query_list.go b/go/vt/vttablet/tabletserver/query_list.go index efe63ab0a8e..a41f23b6aa0 100644 --- a/go/vt/vttablet/tabletserver/query_list.go +++ b/go/vt/vttablet/tabletserver/query_list.go @@ -57,13 +57,16 @@ type QueryList struct { // so have to maintain a list to compare with the actual connection. // and remove appropriately. queryDetails map[int64][]*QueryDetail + + parser *sqlparser.Parser } // NewQueryList creates a new QueryList -func NewQueryList(name string) *QueryList { +func NewQueryList(name string, parser *sqlparser.Parser) *QueryList { return &QueryList{ name: name, queryDetails: make(map[int64][]*QueryDetail), + parser: parser, } } @@ -150,7 +153,7 @@ func (ql *QueryList) AppendQueryzRows(rows []QueryDetailzRow) []QueryDetailzRow for _, qd := range qds { query := qd.conn.Current() if streamlog.GetRedactDebugUIQueries() { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = ql.parser.RedactSQLQuery(query) } row := QueryDetailzRow{ Type: ql.name, diff --git a/go/vt/vttablet/tabletserver/query_list_test.go b/go/vt/vttablet/tabletserver/query_list_test.go index 02b24d86cda..57b672a16e0 100644 --- a/go/vt/vttablet/tabletserver/query_list_test.go +++ b/go/vt/vttablet/tabletserver/query_list_test.go @@ -22,6 +22,8 @@ import ( "time" "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/vt/sqlparser" ) type testConn struct { @@ -44,7 +46,7 @@ func (tc *testConn) IsKilled() bool { } func TestQueryList(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd := NewQueryDetail(context.Background(), &testConn{id: connID}) ql.Add(qd) @@ -69,7 +71,7 @@ func TestQueryList(t *testing.T) { } func TestQueryListChangeConnIDInMiddle(t *testing.T) { - ql := NewQueryList("test") + ql := NewQueryList("test", sqlparser.NewTestParser()) connID := int64(1) qd1 := NewQueryDetail(context.Background(), &testConn{id: connID}) ql.Add(qd1) diff --git a/go/vt/vttablet/tabletserver/querylogz.go b/go/vt/vttablet/tabletserver/querylogz.go index 41a40a0720c..8f42192c330 100644 --- a/go/vt/vttablet/tabletserver/querylogz.go +++ b/go/vt/vttablet/tabletserver/querylogz.go @@ -59,7 +59,7 @@ var ( querylogzFuncMap = template.FuncMap{ "stampMicro": func(t time.Time) string { return t.Format(time.StampMicro) }, "cssWrappable": logz.Wrappable, - "truncateQuery": sqlparser.TruncateForUI, + "truncateQuery": sqlparser.NewTestParser().TruncateForUI, "unquote": func(s string) string { return strings.Trim(s, "\"") }, } querylogzTmpl = template.Must(template.New("example").Funcs(querylogzFuncMap).Parse(` diff --git a/go/vt/vttablet/tabletserver/queryz.go b/go/vt/vttablet/tabletserver/queryz.go index 151f028ca09..f56402a1fdb 100644 --- a/go/vt/vttablet/tabletserver/queryz.go +++ b/go/vt/vttablet/tabletserver/queryz.go @@ -27,7 +27,6 @@ import ( "vitess.io/vitess/go/acl" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logz" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/planbuilder" ) @@ -157,7 +156,7 @@ func queryzHandler(qe *QueryEngine, w http.ResponseWriter, r *http.Request) { return true } Value := &queryzRow{ - Query: logz.Wrappable(sqlparser.TruncateForUI(plan.Original)), + Query: logz.Wrappable(qe.env.SQLParser().TruncateForUI(plan.Original)), Table: plan.TableName().String(), Plan: plan.PlanID, } diff --git a/go/vt/vttablet/tabletserver/repltracker/reader_test.go b/go/vt/vttablet/tabletserver/repltracker/reader_test.go index 60321cb6164..b46f5545bde 100644 --- a/go/vt/vttablet/tabletserver/repltracker/reader_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/reader_test.go @@ -21,14 +21,15 @@ import ( "testing" "time" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -140,12 +141,12 @@ func newReader(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatReader { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat config.ReplicationTracker.HeartbeatInterval = time.Second - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") config.DB = dbc - tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest")) + tr := newHeartbeatReader(tabletenv.NewEnv(config, "ReaderTest", collations.MySQL8(), sqlparser.NewTestParser())) tr.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go index ee74ed52ab5..b405fb4131d 100644 --- a/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/repltracker_test.go @@ -23,11 +23,13 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -38,10 +40,10 @@ func TestReplTracker(t *testing.T) { config := tabletenv.NewDefaultConfig() config.ReplicationTracker.Mode = tabletenv.Heartbeat config.ReplicationTracker.HeartbeatInterval = time.Second - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params config.DB = dbconfigs.NewTestDBConfigs(cp, cp, "") - env := tabletenv.NewEnv(config, "ReplTrackerTest") + env := tabletenv.NewEnv(config, "ReplTrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) alias := &topodatapb.TabletAlias{ Cell: "cell", Uid: 1, diff --git a/go/vt/vttablet/tabletserver/repltracker/writer_test.go b/go/vt/vttablet/tabletserver/repltracker/writer_test.go index 664a0464b78..ade70c6f669 100644 --- a/go/vt/vttablet/tabletserver/repltracker/writer_test.go +++ b/go/vt/vttablet/tabletserver/repltracker/writer_test.go @@ -23,10 +23,12 @@ import ( "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/dbconfigs" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -68,11 +70,11 @@ func newTestWriter(db *fakesqldb.DB, frozenTime *time.Time) *heartbeatWriter { config.ReplicationTracker.Mode = tabletenv.Heartbeat config.ReplicationTracker.HeartbeatInterval = time.Second - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params dbc := dbconfigs.NewTestDBConfigs(cp, cp, "") - tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest"), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) + tw := newHeartbeatWriter(tabletenv.NewEnv(config, "WriterTest", collations.MySQL8(), sqlparser.NewTestParser()), &topodatapb.TabletAlias{Cell: "test", Uid: 1111}) tw.keyspaceShard = "test:0" if frozenTime != nil { diff --git a/go/vt/vttablet/tabletserver/schema/db.go b/go/vt/vttablet/tabletserver/schema/db.go index 5699ffc1bde..4bea80c4010 100644 --- a/go/vt/vttablet/tabletserver/schema/db.go +++ b/go/vt/vttablet/tabletserver/schema/db.go @@ -89,7 +89,7 @@ where table_schema = database() and table_name in ::viewNames` ) // reloadTablesDataInDB reloads teh tables information we have stored in our database we use for schema-tracking. -func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string) error { +func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Table, droppedTables []string, parser *sqlparser.Parser) error { // No need to do anything if we have no tables to refresh or drop. if len(tables) == 0 && len(droppedTables) == 0 { return nil @@ -117,7 +117,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // Generate the queries to delete and insert table data. - clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable) + clearTableParsedQuery, err := generateFullQuery(deleteFromSchemaEngineTablesTable, parser) if err != nil { return err } @@ -126,7 +126,7 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta return err } - insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables) + insertTablesParsedQuery, err := generateFullQuery(insertTableIntoSchemaEngineTables, parser) if err != nil { return err } @@ -162,8 +162,8 @@ func reloadTablesDataInDB(ctx context.Context, conn *connpool.Conn, tables []*Ta } // generateFullQuery generates the full query from the query as a string. -func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { - stmt, err := sqlparser.Parse( +func generateFullQuery(query string, parser *sqlparser.Parser) (*sqlparser.ParsedQuery, error) { + stmt, err := parser.Parse( sqlparser.BuildParsedQuery(query, sidecar.GetIdentifier(), sidecar.GetIdentifier()).Query) if err != nil { return nil, err @@ -174,7 +174,7 @@ func generateFullQuery(query string) (*sqlparser.ParsedQuery, error) { } // reloadViewsDataInDB reloads teh views information we have stored in our database we use for schema-tracking. -func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string) error { +func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Table, droppedViews []string, parser *sqlparser.Parser) error { // No need to do anything if we have no views to refresh or drop. if len(views) == 0 && len(droppedViews) == 0 { return nil @@ -213,7 +213,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return nil }, func() *sqltypes.Result { return &sqltypes.Result{} }, - 1000, + 1000, parser, ) if err != nil { return err @@ -221,7 +221,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // Generate the queries to delete and insert view data. - clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable) + clearViewParsedQuery, err := generateFullQuery(deleteFromSchemaEngineViewsTable, parser) if err != nil { return err } @@ -230,7 +230,7 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl return err } - insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews) + insertViewsParsedQuery, err := generateFullQuery(insertViewIntoSchemaEngineViews, parser) if err != nil { return err } @@ -266,8 +266,8 @@ func reloadViewsDataInDB(ctx context.Context, conn *connpool.Conn, views []*Tabl } // getViewDefinition gets the viewDefinition for the given views. -func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int) error { - viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions) +func getViewDefinition(ctx context.Context, conn *connpool.Conn, bv map[string]*querypb.BindVariable, callback func(qr *sqltypes.Result) error, alloc func() *sqltypes.Result, bufferSize int, parser *sqlparser.Parser) error { + viewsDefParsedQuery, err := generateFullQuery(fetchViewDefinitions, parser) if err != nil { return err } @@ -358,7 +358,7 @@ func (se *Engine) getMismatchedTableNames(ctx context.Context, conn *connpool.Co } // reloadDataInDB reloads the schema tracking data in the database -func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, created []*Table, dropped []*Table) error { +func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, created []*Table, dropped []*Table, parser *sqlparser.Parser) error { // tablesToReload and viewsToReload stores the tables and views that need reloading and storing in our MySQL database. var tablesToReload, viewsToReload []*Table // droppedTables, droppedViews stores the list of tables and views we need to delete, respectively. @@ -382,19 +382,19 @@ func reloadDataInDB(ctx context.Context, conn *connpool.Conn, altered []*Table, } } - if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables); err != nil { + if err := reloadTablesDataInDB(ctx, conn, tablesToReload, droppedTables, parser); err != nil { return err } - if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews); err != nil { + if err := reloadViewsDataInDB(ctx, conn, viewsToReload, droppedViews, parser); err != nil { return err } return nil } // GetFetchViewQuery gets the fetch query to run for getting the listed views. If no views are provided, then all the views are fetched. -func GetFetchViewQuery(viewNames []string) (string, error) { +func GetFetchViewQuery(viewNames []string, parser *sqlparser.Parser) (string, error) { if len(viewNames) == 0 { - parsedQuery, err := generateFullQuery(fetchViews) + parsedQuery, err := generateFullQuery(fetchViews, parser) if err != nil { return "", err } @@ -407,7 +407,7 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"viewNames": viewsBV} - parsedQuery, err := generateFullQuery(fetchUpdatedViews) + parsedQuery, err := generateFullQuery(fetchUpdatedViews, parser) if err != nil { return "", err } @@ -415,9 +415,9 @@ func GetFetchViewQuery(viewNames []string) (string, error) { } // GetFetchTableQuery gets the fetch query to run for getting the listed tables. If no tables are provided, then all the tables are fetched. -func GetFetchTableQuery(tableNames []string) (string, error) { +func GetFetchTableQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTables) + parsedQuery, err := generateFullQuery(fetchTables, parser) if err != nil { return "", err } @@ -430,7 +430,7 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTables) + parsedQuery, err := generateFullQuery(fetchUpdatedTables, parser) if err != nil { return "", err } @@ -438,9 +438,9 @@ func GetFetchTableQuery(tableNames []string) (string, error) { } // GetFetchTableAndViewsQuery gets the fetch query to run for getting the listed tables and views. If no table names are provided, then all the tables and views are fetched. -func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { +func GetFetchTableAndViewsQuery(tableNames []string, parser *sqlparser.Parser) (string, error) { if len(tableNames) == 0 { - parsedQuery, err := generateFullQuery(fetchTablesAndViews) + parsedQuery, err := generateFullQuery(fetchTablesAndViews, parser) if err != nil { return "", err } @@ -453,7 +453,7 @@ func GetFetchTableAndViewsQuery(tableNames []string) (string, error) { } bv := map[string]*querypb.BindVariable{"tableNames": tablesBV} - parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews) + parsedQuery, err := generateFullQuery(fetchUpdatedTablesAndViews, parser) if err != nil { return "", err } diff --git a/go/vt/vttablet/tabletserver/schema/db_test.go b/go/vt/vttablet/tabletserver/schema/db_test.go index ac6999d309a..742e2521854 100644 --- a/go/vt/vttablet/tabletserver/schema/db_test.go +++ b/go/vt/vttablet/tabletserver/schema/db_test.go @@ -26,12 +26,14 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/maps2" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" + "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) var ( @@ -81,7 +83,7 @@ func TestGenerateFullQuery(t *testing.T) { tt.wantQuery = tt.query } - got, err := generateFullQuery(tt.query) + got, err := generateFullQuery(tt.query, sqlparser.NewTestParser()) if tt.wantErr != "" { require.EqualError(t, err, tt.wantErr) return @@ -96,7 +98,8 @@ func TestGenerateFullQuery(t *testing.T) { func TestGetCreateStatement(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetCreateStatement", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success view @@ -131,7 +134,8 @@ func TestGetCreateStatement(t *testing.T) { func TestGetChangedViewNames(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetChangedViewNames", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Success @@ -164,7 +168,8 @@ func TestGetChangedViewNames(t *testing.T) { func TestGetViewDefinition(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, "TestGetViewDefinition", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) viewsBV, err := sqltypes.BuildBindVariable([]string{"v1", "lead"}) @@ -209,7 +214,7 @@ func collectGetViewDefinitions(conn *connpool.Conn, bv map[string]*querypb.BindV return nil }, func() *sqltypes.Result { return &sqltypes.Result{} - }, 1000) + }, 1000, sqlparser.NewTestParser()) return viewDefinitions, err } @@ -336,7 +341,8 @@ func TestGetMismatchedTableNames(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tc.dbError != "" { @@ -456,7 +462,8 @@ func TestReloadTablesInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -467,7 +474,7 @@ func TestReloadTablesInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete) + err = reloadTablesDataInDB(context.Background(), conn, tc.tablesToReload, tc.tablesToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -588,7 +595,8 @@ func TestReloadViewsInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -599,7 +607,7 @@ func TestReloadViewsInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete) + err = reloadViewsDataInDB(context.Background(), conn, tc.viewsToReload, tc.viewsToDelete, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -878,7 +886,8 @@ func TestReloadDataInDB(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tc.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) // Add queries with the expected results and errors. @@ -889,7 +898,7 @@ func TestReloadDataInDB(t *testing.T) { db.AddRejectedQuery(query, errorToThrow) } - err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped) + err = reloadDataInDB(context.Background(), conn, tc.altered, tc.created, tc.dropped, sqlparser.NewTestParser()) if tc.expectedError != "" { require.ErrorContains(t, err, tc.expectedError) return @@ -920,7 +929,7 @@ func TestGetFetchViewQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchViewQuery(testcase.viewNames) + query, err := GetFetchViewQuery(testcase.viewNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -947,7 +956,7 @@ func TestGetFetchTableQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableQuery(testcase.tableNames) + query, err := GetFetchTableQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) @@ -974,7 +983,7 @@ func TestGetFetchTableAndViewsQuery(t *testing.T) { for _, testcase := range testcases { t.Run(testcase.name, func(t *testing.T) { - query, err := GetFetchTableAndViewsQuery(testcase.tableNames) + query, err := GetFetchTableAndViewsQuery(testcase.tableNames, sqlparser.NewTestParser()) require.NoError(t, err) require.Equal(t, testcase.expectedQuery, query) }) diff --git a/go/vt/vttablet/tabletserver/schema/engine.go b/go/vt/vttablet/tabletserver/schema/engine.go index 9f973324302..62a1e9afa2b 100644 --- a/go/vt/vttablet/tabletserver/schema/engine.go +++ b/go/vt/vttablet/tabletserver/schema/engine.go @@ -29,6 +29,7 @@ import ( "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/maps2" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/mysql/sqlerror" @@ -161,7 +162,7 @@ func (se *Engine) syncSidecarDB(ctx context.Context, conn *dbconnpool.DBConnecti } return conn.ExecuteFetch(query, maxRows, true) } - if err := sidecardb.Init(ctx, exec); err != nil { + if err := sidecardb.Init(ctx, exec, se.env.SQLParser()); err != nil { log.Errorf("Error in sidecardb.Init: %+v", err) if se.env.Config().DB.HasGlobalSettings() { log.Warning("Ignoring sidecardb.Init error for unmanaged tablets") @@ -498,7 +499,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { log.V(2).Infof("Reading schema for table: %s", tableName) tableType := row[1].String() - table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString()) + table, err := LoadTable(conn, se.cp.DBName(), tableName, tableType, row[3].ToString(), se.env.CollationEnv()) if err != nil { if isView := strings.Contains(tableType, tmutils.TableView); isView { log.Warningf("Failed reading schema for the view: %s, error: %v", tableName, err) @@ -535,7 +536,7 @@ func (se *Engine) reload(ctx context.Context, includeStats bool) error { if shouldUseDatabase { // If reloadDataInDB succeeds, then we don't want to prevent sending the broadcast notification. // So, we do this step in the end when we can receive no more errors that fail the reload operation. - err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped) + err = reloadDataInDB(ctx, conn.Conn, altered, created, dropped, se.env.SQLParser()) if err != nil { log.Errorf("error in updating schema information in Engine.reload() - %v", err) } @@ -827,6 +828,7 @@ func NewEngineForTests() *Engine { isOpen: true, tables: make(map[string]*Table), historian: newHistorian(false, 0, nil), + env: tabletenv.NewEnv(tabletenv.NewDefaultConfig(), "SchemaEngineForTests", collations.MySQL8(), sqlparser.NewTestParser()), } return se } @@ -842,6 +844,14 @@ func (se *Engine) GetDBConnector() dbconfigs.Connector { return se.cp } +func (se *Engine) CollationEnv() *collations.Environment { + return se.env.CollationEnv() +} + +func (se *Engine) SQLParser() *sqlparser.Parser { + return se.env.SQLParser() +} + func extractNamesFromTablesList(tables []*Table) []string { var tableNames []string for _, table := range tables { diff --git a/go/vt/vttablet/tabletserver/schema/engine_test.go b/go/vt/vttablet/tabletserver/schema/engine_test.go index 2eb117cba36..0e9f0dd9162 100644 --- a/go/vt/vttablet/tabletserver/schema/engine_test.go +++ b/go/vt/vttablet/tabletserver/schema/engine_test.go @@ -32,13 +32,12 @@ import ( "github.com/stretchr/testify/require" "vitess.io/vitess/go/constants/sidecar" - - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql/sqlerror" - "vitess.io/vitess/go/event/syslogger" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/test/utils" @@ -581,13 +580,13 @@ func newEngine(reloadTime time.Duration, idleTimeout time.Duration, schemaMaxAge config.OlapReadPool.IdleTimeout = idleTimeout config.TxPool.IdleTimeout = idleTimeout config.SchemaVersionMaxAgeSeconds = schemaMaxAgeSeconds - se := NewEngine(tabletenv.NewEnv(config, "SchemaTest")) + se := NewEngine(tabletenv.NewEnv(config, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser())) se.InitDBConfig(newDBConfigs(db).DbaWithDB()) return se } func newDBConfigs(db *fakesqldb.DB) *dbconfigs.DBConfigs { - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params return dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") } @@ -765,7 +764,8 @@ func TestEngineMysqlTime(t *testing.T) { t.Run(tt.name, func(t *testing.T) { se := &Engine{} db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) if tt.timeStampErr != nil { @@ -871,7 +871,8 @@ func TestEnginePopulatePrimaryKeys(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} @@ -932,7 +933,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, tt.name, collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := &Engine{} se.innoDbReadRowsCounter = stats.NewCounter("TestEngineUpdateInnoDBRowsRead-"+tt.name, "") @@ -959,7 +961,8 @@ func TestEngineUpdateInnoDBRowsRead(t *testing.T) { // TestEngineGetTableData tests the functionality of getTableData function func TestEngineGetTableData(t *testing.T) { db := fakesqldb.New(t) - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, "TestEngineGetTableData", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) tests := []struct { @@ -1133,7 +1136,8 @@ func TestEngineReload(t *testing.T) { cfg := tabletenv.NewDefaultConfig() cfg.DB = newDBConfigs(db) cfg.SignalWhenSchemaChange = true - conn, err := connpool.NewConn(context.Background(), db.ConnParams(), nil, nil) + env := tabletenv.NewEnv(nil, "TestEngineReload", collations.MySQL8(), sqlparser.NewTestParser()) + conn, err := connpool.NewConn(context.Background(), dbconfigs.New(db.ConnParams()), nil, nil, env) require.NoError(t, err) se := newEngine(10*time.Second, 10*time.Second, 0, db) diff --git a/go/vt/vttablet/tabletserver/schema/historian_test.go b/go/vt/vttablet/tabletserver/schema/historian_test.go index f66306966de..1d66ecefd97 100644 --- a/go/vt/vttablet/tabletserver/schema/historian_test.go +++ b/go/vt/vttablet/tabletserver/schema/historian_test.go @@ -39,7 +39,7 @@ func getTable(name string, fieldNames []string, fieldTypes []querypb.Type, pks [ fields := []*querypb.Field{} for i := range fieldNames { typ := fieldTypes[i] - cs := collations.DefaultCollationForType(typ) + cs := collations.CollationForType(typ, collations.MySQL8().DefaultConnectionCharset()) fields = append(fields, &querypb.Field{ Name: fieldNames[i], Type: typ, diff --git a/go/vt/vttablet/tabletserver/schema/load_table.go b/go/vt/vttablet/tabletserver/schema/load_table.go index 687672a4a02..e4e464f3fce 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table.go +++ b/go/vt/vttablet/tabletserver/schema/load_table.go @@ -34,7 +34,7 @@ import ( ) // LoadTable creates a Table from the schema info in the database. -func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType string, comment string) (*Table, error) { +func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType string, comment string, collationEnv *collations.Environment) (*Table, error) { ta := NewTable(tableName, NoType) sqlTableName := sqlparser.String(ta.Name) if err := fetchColumns(ta, conn, databaseName, sqlTableName); err != nil { @@ -45,7 +45,7 @@ func LoadTable(conn *connpool.PooledConn, databaseName, tableName, tableType str ta.Type = Sequence ta.SequenceInfo = &SequenceInfo{} case strings.Contains(comment, "vitess_message"): - if err := loadMessageInfo(ta, comment); err != nil { + if err := loadMessageInfo(ta, comment, collationEnv); err != nil { return nil, err } ta.Type = Message @@ -68,7 +68,7 @@ func fetchColumns(ta *Table, conn *connpool.PooledConn, databaseName, sqlTableNa return nil } -func loadMessageInfo(ta *Table, comment string) error { +func loadMessageInfo(ta *Table, comment string, collationEnv *collations.Environment) error { ta.MessageInfo = &MessageInfo{} // Extract keyvalues. keyvals := make(map[string]string) @@ -152,7 +152,7 @@ func loadMessageInfo(ta *Table, comment string) error { if specifiedCols[0] != "id" { return fmt.Errorf("vt_message_cols must begin with id: %s", ta.Name.String()) } - ta.MessageInfo.Fields = getSpecifiedMessageFields(ta.Fields, specifiedCols) + ta.MessageInfo.Fields = getSpecifiedMessageFields(ta.Fields, specifiedCols, collationEnv) } else { ta.MessageInfo.Fields = getDefaultMessageFields(ta.Fields, hiddenCols) } @@ -211,11 +211,11 @@ func getDefaultMessageFields(tableFields []*querypb.Field, hiddenCols map[string // we have already validated that all the specified columns exist in the table schema, so we don't need to // check again and possibly return an error here. -func getSpecifiedMessageFields(tableFields []*querypb.Field, specifiedCols []string) []*querypb.Field { +func getSpecifiedMessageFields(tableFields []*querypb.Field, specifiedCols []string, collationEnv *collations.Environment) []*querypb.Field { fields := make([]*querypb.Field, 0, len(specifiedCols)) for _, col := range specifiedCols { for _, field := range tableFields { - if res, _ := evalengine.NullsafeCompare(sqltypes.NewVarChar(field.Name), sqltypes.NewVarChar(strings.TrimSpace(col)), collations.Default()); res == 0 { + if res, _ := evalengine.NullsafeCompare(sqltypes.NewVarChar(field.Name), sqltypes.NewVarChar(strings.TrimSpace(col)), collationEnv, collationEnv.DefaultConnectionCharset()); res == 0 { fields = append(fields, field) break } diff --git a/go/vt/vttablet/tabletserver/schema/load_table_test.go b/go/vt/vttablet/tabletserver/schema/load_table_test.go index 088afac3720..5ae79193b36 100644 --- a/go/vt/vttablet/tabletserver/schema/load_table_test.go +++ b/go/vt/vttablet/tabletserver/schema/load_table_test.go @@ -23,7 +23,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/dbconfigs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -227,13 +229,13 @@ func TestLoadTableMessage(t *testing.T) { func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Table, error) { ctx := context.Background() - appParams := db.ConnParams() - dbaParams := db.ConnParams() + appParams := dbconfigs.New(db.ConnParams()) + dbaParams := dbconfigs.New(db.ConnParams()) cfg := tabletenv.ConnPoolConfig{ Size: 2, IdleTimeout: 10 * time.Second, } - connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest"), "", cfg) + connPool := connpool.NewPool(tabletenv.NewEnv(nil, "SchemaTest", collations.MySQL8(), sqlparser.NewTestParser()), "", cfg) connPool.Open(appParams, dbaParams, appParams) conn, err := connPool.Get(ctx, nil) if err != nil { @@ -241,7 +243,7 @@ func newTestLoadTable(tableType string, comment string, db *fakesqldb.DB) (*Tabl } defer conn.Recycle() - return LoadTable(conn, "fakesqldb", "test_table", tableType, comment) + return LoadTable(conn, "fakesqldb", "test_table", tableType, comment, collations.MySQL8()) } func mockLoadTableQueries(db *fakesqldb.DB) { diff --git a/go/vt/vttablet/tabletserver/schema/tracker.go b/go/vt/vttablet/tabletserver/schema/tracker.go index 684bb6d317d..58019c4c018 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker.go +++ b/go/vt/vttablet/tabletserver/schema/tracker.go @@ -134,12 +134,12 @@ func (tr *Tracker) process(ctx context.Context) { gtid = event.Gtid } if event.Type == binlogdatapb.VEventType_DDL && - MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName()) { + MustReloadSchemaOnDDL(event.Statement, tr.engine.cp.DBName(), tr.env.SQLParser()) { if err := tr.schemaUpdated(gtid, event.Statement, event.Timestamp); err != nil { tr.env.Stats().ErrorCounters.Add(vtrpcpb.Code_INTERNAL.String(), 1) log.Errorf("Error updating schema: %s for ddl %s, gtid %s", - sqlparser.TruncateForLog(err.Error()), event.Statement, gtid) + tr.env.SQLParser().TruncateForLog(err.Error()), event.Statement, gtid) } } } @@ -248,8 +248,8 @@ func encodeString(in string) string { } // MustReloadSchemaOnDDL returns true if the ddl is for the db which is part of the workflow and is not an online ddl artifact -func MustReloadSchemaOnDDL(sql string, dbname string) bool { - ast, err := sqlparser.Parse(sql) +func MustReloadSchemaOnDDL(sql string, dbname string, parser *sqlparser.Parser) bool { + ast, err := parser.Parse(sql) if err != nil { return false } diff --git a/go/vt/vttablet/tabletserver/schema/tracker_test.go b/go/vt/vttablet/tabletserver/schema/tracker_test.go index 2029235b2e3..8b6f1458283 100644 --- a/go/vt/vttablet/tabletserver/schema/tracker_test.go +++ b/go/vt/vttablet/tabletserver/schema/tracker_test.go @@ -17,14 +17,15 @@ limitations under the License. package schema import ( + "context" "testing" "github.com/stretchr/testify/require" - "context" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" ) @@ -78,7 +79,7 @@ func TestTracker(t *testing.T) { } config := se.env.Config() config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest") + env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) initial := env.Stats().ErrorCounters.Counts()["INTERNAL"] tracker := NewTracker(env, vs, se) tracker.Open() @@ -122,7 +123,7 @@ func TestTrackerShouldNotInsertInitialSchema(t *testing.T) { } config := se.env.Config() config.TrackSchemaVersions = true - env := tabletenv.NewEnv(config, "TrackerTest") + env := tabletenv.NewEnv(config, "TrackerTest", collations.MySQL8(), sqlparser.NewTestParser()) tracker := NewTracker(env, vs, se) tracker.Open() <-vs.done @@ -170,7 +171,7 @@ func TestMustReloadSchemaOnDDL(t *testing.T) { } for _, tc := range testcases { t.Run("", func(t *testing.T) { - require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname)) + require.Equal(t, tc.want, MustReloadSchemaOnDDL(tc.query, tc.dbname, sqlparser.NewTestParser())) }) } } diff --git a/go/vt/vttablet/tabletserver/state_manager_test.go b/go/vt/vttablet/tabletserver/state_manager_test.go index 23e70a66760..4b88ce734d7 100644 --- a/go/vt/vttablet/tabletserver/state_manager_test.go +++ b/go/vt/vttablet/tabletserver/state_manager_test.go @@ -24,17 +24,19 @@ import ( "testing" "time" - "google.golang.org/protobuf/proto" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" ) @@ -397,6 +399,10 @@ func (k *killableConn) Kill(message string, elapsed time.Duration) error { return nil } +func (k *killableConn) SQLParser() *sqlparser.Parser { + return sqlparser.NewTestParser() +} + func TestStateManagerShutdownGracePeriod(t *testing.T) { sm := newTestStateManager(t) defer sm.StopService() @@ -704,11 +710,12 @@ func verifySubcomponent(t *testing.T, order int64, component any, state testStat func newTestStateManager(t *testing.T) *stateManager { order.Store(0) config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, "StateManagerTest") + env := tabletenv.NewEnv(config, "StateManagerTest", collations.MySQL8(), sqlparser.NewTestParser()) + parser := sqlparser.NewTestParser() sm := &stateManager{ - statelessql: NewQueryList("stateless"), - statefulql: NewQueryList("stateful"), - olapql: NewQueryList("olap"), + statelessql: NewQueryList("stateless", parser), + statefulql: NewQueryList("stateful", parser), + olapql: NewQueryList("olap", parser), hs: newHealthStreamer(env, &topodatapb.TabletAlias{}, schema.NewEngine(env)), se: &testSchemaEngine{}, rt: &testReplTracker{lag: 1 * time.Second}, @@ -724,7 +731,7 @@ func newTestStateManager(t *testing.T) *stateManager { tableGC: &testTableGC{}, } sm.Init(env, &querypb.Target{}) - sm.hs.InitDBConfig(&querypb.Target{}, fakesqldb.New(t).ConnParams()) + sm.hs.InitDBConfig(&querypb.Target{}, dbconfigs.New(fakesqldb.New(t).ConnParams())) log.Infof("returning sm: %p", sm) return sm } diff --git a/go/vt/vttablet/tabletserver/stateful_connection.go b/go/vt/vttablet/tabletserver/stateful_connection.go index 739ed5c4295..067f2194655 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection.go +++ b/go/vt/vttablet/tabletserver/stateful_connection.go @@ -26,6 +26,7 @@ import ( "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -184,11 +185,11 @@ func (sc *StatefulConnection) Renew() error { } // String returns a printable version of the connection info. -func (sc *StatefulConnection) String(sanitize bool) string { +func (sc *StatefulConnection) String(sanitize bool, parser *sqlparser.Parser) string { return fmt.Sprintf( "%v\t%s", sc.ConnID, - sc.txProps.String(sanitize), + sc.txProps.String(sanitize, parser), ) } diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool.go b/go/vt/vttablet/tabletserver/stateful_connection_pool.go index ce6f917610e..a28d153dca1 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool.go @@ -93,7 +93,7 @@ func (sf *StatefulConnectionPool) Close() { if conn.IsInTransaction() { thing = "transaction" } - log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages)) + log.Warningf("killing %s for shutdown: %s", thing, conn.String(sf.env.Config().SanitizeLogMessages, sf.env.SQLParser())) sf.env.Stats().InternalErrors.Add("StrayTransactions", 1) conn.Close() conn.Releasef("pool closed") diff --git a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go index b9ea4dfc185..a84052f1d0f 100644 --- a/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go +++ b/go/vt/vttablet/tabletserver/stateful_connection_pool_test.go @@ -25,6 +25,7 @@ import ( "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/dbconfigs" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" ) @@ -37,7 +38,8 @@ func TestActivePoolClientRowsFound(t *testing.T) { db.AddQuery("begin", &sqltypes.Result{}) pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) startNormalSize := pool.conns.Available() startFoundRowsSize := pool.foundRowsPool.Available() @@ -63,7 +65,8 @@ func TestActivePoolForAllTxProps(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn1.txProps = &tx.Properties{} @@ -91,7 +94,8 @@ func TestStatefulPoolShutdownNonTx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) // conn1 non-tx, not in use. conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) @@ -131,7 +135,8 @@ func TestStatefulPoolShutdownAll(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) // conn1 not in use conn1, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) @@ -157,7 +162,8 @@ func TestActivePoolGetConnNonExistentTransaction(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) _, err := pool.GetAndLock(12345, "for query") require.EqualError(t, err, "not found") } @@ -167,7 +173,8 @@ func TestExecWithAbortedCtx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) cancel() @@ -181,7 +188,8 @@ func TestExecWithDbconnClosed(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn.Close() @@ -196,7 +204,8 @@ func TestExecWithDbconnClosedHavingTx(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) conn.txProps = &tx.Properties{Conclusion: "foobar"} @@ -212,7 +221,8 @@ func TestFailOnConnectionRegistering(t *testing.T) { db := fakesqldb.New(t) defer db.Close() pool := newActivePool() - pool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + pool.Open(params, params, params) conn, err := pool.NewConn(ctx, &querypb.ExecuteOptions{}, nil) require.NoError(t, err) defer conn.Close() diff --git a/go/vt/vttablet/tabletserver/tabletenv/env.go b/go/vt/vttablet/tabletserver/tabletenv/env.go index 6ae38138922..8d53ad5d09c 100644 --- a/go/vt/vttablet/tabletserver/tabletenv/env.go +++ b/go/vt/vttablet/tabletserver/tabletenv/env.go @@ -19,9 +19,11 @@ limitations under the License. package tabletenv import ( + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/tb" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" ) // Env defines the functions supported by TabletServer @@ -31,30 +33,38 @@ type Env interface { Config() *TabletConfig Exporter() *servenv.Exporter Stats() *Stats + SQLParser() *sqlparser.Parser LogError() + CollationEnv() *collations.Environment } type testEnv struct { - config *TabletConfig - exporter *servenv.Exporter - stats *Stats + config *TabletConfig + exporter *servenv.Exporter + stats *Stats + collationEnv *collations.Environment + parser *sqlparser.Parser } // NewEnv creates an Env that can be used for tabletserver subcomponents // without an actual TabletServer. -func NewEnv(config *TabletConfig, exporterName string) Env { +func NewEnv(config *TabletConfig, exporterName string, collationEnv *collations.Environment, parser *sqlparser.Parser) Env { exporter := servenv.NewExporter(exporterName, "Tablet") return &testEnv{ - config: config, - exporter: exporter, - stats: NewStats(exporter), + config: config, + exporter: exporter, + stats: NewStats(exporter), + collationEnv: collationEnv, + parser: parser, } } -func (*testEnv) CheckMySQL() {} -func (te *testEnv) Config() *TabletConfig { return te.config } -func (te *testEnv) Exporter() *servenv.Exporter { return te.exporter } -func (te *testEnv) Stats() *Stats { return te.stats } +func (*testEnv) CheckMySQL() {} +func (te *testEnv) Config() *TabletConfig { return te.config } +func (te *testEnv) Exporter() *servenv.Exporter { return te.exporter } +func (te *testEnv) Stats() *Stats { return te.stats } +func (te *testEnv) CollationEnv() *collations.Environment { return te.collationEnv } +func (te *testEnv) SQLParser() *sqlparser.Parser { return te.parser } func (te *testEnv) LogError() { if x := recover(); x != nil { diff --git a/go/vt/vttablet/tabletserver/tabletserver.go b/go/vt/vttablet/tabletserver/tabletserver.go index 308573eb82b..af7ba01519c 100644 --- a/go/vt/vttablet/tabletserver/tabletserver.go +++ b/go/vt/vttablet/tabletserver/tabletserver.go @@ -34,6 +34,7 @@ import ( "time" "vitess.io/vitess/go/acl" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/pools/smartconnpool" "vitess.io/vitess/go/sqltypes" @@ -128,6 +129,13 @@ type TabletServer struct { // This field is only stored for testing checkMysqlGaugeFunc *stats.GaugeFunc + + collationEnv *collations.Environment + parser *sqlparser.Parser +} + +func (tsv *TabletServer) SQLParser() *sqlparser.Parser { + return tsv.parser } var _ queryservice.QueryService = (*TabletServer)(nil) @@ -138,8 +146,8 @@ var _ queryservice.QueryService = (*TabletServer)(nil) var RegisterFunctions []func(Controller) // NewServer creates a new TabletServer based on the command line flags. -func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { - return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias) +func NewServer(ctx context.Context, name string, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) *TabletServer { + return NewTabletServer(ctx, name, tabletenv.NewCurrentConfig(), topoServer, alias, collationEnv, parser) } var ( @@ -149,7 +157,7 @@ var ( // NewTabletServer creates an instance of TabletServer. Only the first // instance of TabletServer will expose its state variables. -func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias) *TabletServer { +func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletConfig, topoServer *topo.Server, alias *topodatapb.TabletAlias, collationEnv *collations.Environment, parser *sqlparser.Parser) *TabletServer { exporter := servenv.NewExporter(name, "Tablet") tsv := &TabletServer{ exporter: exporter, @@ -160,6 +168,8 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC enableHotRowProtection: config.HotRowProtection.Mode != tabletenv.Disable, topoServer: topoServer, alias: alias.CloneVT(), + collationEnv: collationEnv, + parser: parser, } tsv.QueryTimeout.Store(config.Oltp.QueryTimeout.Nanoseconds()) @@ -172,9 +182,9 @@ func NewTabletServer(ctx context.Context, name string, config *tabletenv.TabletC return tsv.sm.Target().TabletType } - tsv.statelessql = NewQueryList("oltp-stateless") - tsv.statefulql = NewQueryList("oltp-stateful") - tsv.olapql = NewQueryList("olap") + tsv.statelessql = NewQueryList("oltp-stateless", parser) + tsv.statefulql = NewQueryList("oltp-stateful", parser) + tsv.olapql = NewQueryList("olap", parser) tsv.se = schema.NewEngine(tsv) tsv.hs = newHealthStreamer(tsv, alias, tsv.se) tsv.rt = repltracker.NewReplTracker(tsv, alias) @@ -336,6 +346,11 @@ func (tsv *TabletServer) Stats() *tabletenv.Stats { return tsv.stats } +// Stats satisfies tabletenv.Env. +func (tsv *TabletServer) CollationEnv() *collations.Environment { + return tsv.collationEnv +} + // LogError satisfies tabletenv.Env. func (tsv *TabletServer) LogError() { if x := recover(); x != nil { @@ -1603,13 +1618,13 @@ func (tsv *TabletServer) handlePanicAndSendLogStats( // not a concern. var messagef, logMessage, query, truncatedQuery string messagef = fmt.Sprintf("Uncaught panic for %%v:\n%v\n%s", x, tb.Stack(4) /* Skip the last 4 boiler-plate frames. */) - query = queryAsString(sql, bindVariables, tsv.TerseErrors, false) + query = queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.SQLParser()) terr := vterrors.Errorf(vtrpcpb.Code_UNKNOWN, "%s", fmt.Sprintf(messagef, query)) if tsv.TerseErrors == tsv.Config().SanitizeLogMessages { - truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.TerseErrors, true, tsv.SQLParser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } else { - truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true) + truncatedQuery = queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser()) logMessage = fmt.Sprintf(messagef, truncatedQuery) } log.Error(logMessage) @@ -1669,20 +1684,20 @@ func (tsv *TabletServer) convertAndLogError(ctx context.Context, sql string, bin sqlState := sqlErr.SQLState() errnum := sqlErr.Number() if tsv.TerseErrors && errCode != vtrpcpb.Code_FAILED_PRECONDITION { - err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false)) + err = vterrors.Errorf(errCode, "(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.TerseErrors, false, tsv.SQLParser())) if logMethod != nil { - message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("(errno %d) (sqlstate %s)%s: %s", errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } else { - err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false)) + err = vterrors.Errorf(errCode, "%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, false, false, tsv.SQLParser())) if logMethod != nil { - message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%s (errno %d) (sqlstate %s)%s: %s", sqlErr.Message, errnum, sqlState, callerID, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } } else { err = vterrors.Errorf(errCode, "%v%s", err.Error(), callerID) if logMethod != nil { - message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true)) + message = fmt.Sprintf("%v: %v", err, queryAsString(sql, bindVariables, tsv.Config().SanitizeLogMessages, true, tsv.SQLParser())) } } @@ -2114,7 +2129,7 @@ func (tsv *TabletServer) ConsolidatorMode() string { // If sanitize is false it also includes the bind variables. // If truncateForLog is true, it truncates the sql query and the // bind variables. -func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool) string { +func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, sanitize bool, truncateForLog bool, parser *sqlparser.Parser) string { // Add the bind vars unless this needs to be sanitized, e.g. for log messages bvBuf := &bytes.Buffer{} fmt.Fprintf(bvBuf, "BindVars: {") @@ -2138,7 +2153,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the bind vars if necessary bv := bvBuf.String() - maxLen := sqlparser.GetTruncateErrLen() + maxLen := parser.GetTruncateErrLen() if truncateForLog && maxLen > 0 && len(bv) > maxLen { if maxLen <= 12 { bv = sqlparser.TruncationText @@ -2149,7 +2164,7 @@ func queryAsString(sql string, bindVariables map[string]*querypb.BindVariable, s // Truncate the sql query if necessary if truncateForLog { - sql = sqlparser.TruncateForLog(sql) + sql = parser.TruncateForLog(sql) } // sql is the normalized query without the bind vars diff --git a/go/vt/vttablet/tabletserver/tabletserver_test.go b/go/vt/vttablet/tabletserver/tabletserver_test.go index d8595630480..4a275cd6253 100644 --- a/go/vt/vttablet/tabletserver/tabletserver_test.go +++ b/go/vt/vttablet/tabletserver/tabletserver_test.go @@ -30,6 +30,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/config" "vitess.io/vitess/go/mysql/sqlerror" "vitess.io/vitess/go/vt/dbconfigs" vttestpb "vitess.io/vitess/go/vt/proto/vttest" @@ -446,7 +448,7 @@ func TestTabletServerBeginFail(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TxPool.Size = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -954,7 +956,7 @@ func TestSerializeTransactionsSameRow(t *testing.T) { config.HotRowProtection.MaxConcurrency = 1 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1061,7 +1063,7 @@ func TestDMLQueryWithoutWhereClause(t *testing.T) { config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1089,7 +1091,7 @@ func TestSerializeTransactionsSameRow_ConcurrentTransactions(t *testing.T) { config.HotRowProtection.MaxConcurrency = 2 // Reduce the txpool to 2 because we should never consume more than two slots. config.TxPool.Size = 2 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1225,7 +1227,7 @@ func TestSerializeTransactionsSameRow_TooManyPendingRequests(t *testing.T) { config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1308,7 +1310,7 @@ func TestSerializeTransactionsSameRow_RequestCanceled(t *testing.T) { config := tabletenv.NewDefaultConfig() config.HotRowProtection.Mode = tabletenv.Enable config.HotRowProtection.MaxConcurrency = 1 - db, tsv := setupTabletServerTestCustom(t, ctx, config, "") + db, tsv := setupTabletServerTestCustom(t, ctx, config, "", sqlparser.NewTestParser()) defer tsv.StopService() defer db.Close() @@ -1562,7 +1564,7 @@ func TestHandleExecUnknownError(t *testing.T) { defer cancel() logStats := tabletenv.NewLogStats(ctx, "TestHandleExecError") config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) defer tsv.handlePanicAndSendLogStats("select * from test_table", nil, logStats) panic("unknown exec error") } @@ -1575,23 +1577,25 @@ func TestQueryAsString(t *testing.T) { "bv3": sqltypes.Int64BindVariable(3333333333), "bv4": sqltypes.Int64BindVariable(4444444444), } - origTruncateErrLen := sqlparser.GetTruncateErrLen() - sqlparser.SetTruncateErrLen(32) - defer sqlparser.SetTruncateErrLen(origTruncateErrLen) + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 32, + }) + require.NoError(t, err) - query := queryAsString(longSql, longBv, true, true) + query := queryAsString(longSql, longBv, true, true, parser) want := "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, true, false) + query = queryAsString(longSql, longBv, true, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {[REDACTED]}" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, true) + query = queryAsString(longSql, longBv, false, true, parser) want = "Sql: \"select * from test_t [TRUNCATED]\", BindVars: {bv1: \"typ [TRUNCATED]" assert.Equal(t, want, query) - query = queryAsString(longSql, longBv, false, false) + query = queryAsString(longSql, longBv, false, false, parser) want = "Sql: \"select * from test_table_loooooooooooooooooooooooooooooooooooong\", BindVars: {bv1: \"type:INT64 value:\\\"1111111111\\\"\"bv2: \"type:INT64 value:\\\"2222222222\\\"\"bv3: \"type:INT64 value:\\\"3333333333\\\"\"bv4: \"type:INT64 value:\\\"4444444444\\\"\"}" assert.Equal(t, want, query) } @@ -1682,7 +1686,7 @@ func TestHandleExecTabletError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1707,7 +1711,7 @@ func TestTerseErrors(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = false - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1741,7 +1745,7 @@ func TestSanitizeLogMessages(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1774,7 +1778,7 @@ func TestTerseErrorsNonSQLError(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1799,7 +1803,7 @@ func TestSanitizeLogMessagesNonSQLError(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = false config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1824,7 +1828,7 @@ func TestSanitizeMessagesBindVars(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() @@ -1855,7 +1859,7 @@ func TestSanitizeMessagesNoBindVars(t *testing.T) { config := tabletenv.NewDefaultConfig() config.TerseErrors = true config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "", nil, vterrors.Errorf(vtrpcpb.Code_DEADLINE_EXCEEDED, "sensitive message"), nil) @@ -1873,7 +1877,7 @@ func TestTruncateErrorLen(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TruncateErrorLen = 32 - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError( @@ -1895,19 +1899,23 @@ func TestTruncateErrorLen(t *testing.T) { func TestTruncateMessages(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - config := tabletenv.NewDefaultConfig() - config.TerseErrors = false + cfg := tabletenv.NewDefaultConfig() + cfg.TerseErrors = false // Sanitize the log messages, which means that the bind vars are omitted - config.SanitizeLogMessages = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + cfg.SanitizeLogMessages = true + parser, err := sqlparser.New(sqlparser.Options{ + MySQLServerVersion: config.DefaultMySQLVersion, + TruncateErrLen: 52, + }) + require.NoError(t, err) + tsv := NewTabletServer(ctx, "TabletServerTest", cfg, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), parser) tl := newTestLogger() defer tl.Close() - sqlparser.SetTruncateErrLen(52) sql := "select * from test_table where xyz = :vtg1 order by abc desc" sqlErr := sqlerror.NewSQLError(10, "HY000", "sensitive message") sqlErr.Query = "select * from test_table where xyz = 'this is kinda long eh'" - err := tsv.convertAndLogError( + err = tsv.convertAndLogError( ctx, sql, map[string]*querypb.BindVariable{"vtg1": sqltypes.StringBindVariable("this is kinda long eh")}, @@ -1927,7 +1935,7 @@ func TestTruncateMessages(t *testing.T) { t.Errorf("log got '%s', want '%s'", tl.getLog(0), wantLog) } - sqlparser.SetTruncateErrLen(140) + parser.SetTruncateErrLen(140) err = tsv.convertAndLogError( ctx, sql, @@ -1947,7 +1955,6 @@ func TestTruncateMessages(t *testing.T) { if wantLog != tl.getLog(1) { t.Errorf("log got '%s', want '%s'", tl.getLog(1), wantLog) } - sqlparser.SetTruncateErrLen(0) } func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { @@ -1955,7 +1962,7 @@ func TestTerseErrorsIgnoreFailoverInProgress(t *testing.T) { defer cancel() config := tabletenv.NewDefaultConfig() config.TerseErrors = true - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) tl := newTestLogger() defer tl.Close() err := tsv.convertAndLogError(ctx, "select * from test_table where id = :a", @@ -1997,7 +2004,7 @@ func TestACLHUP(t *testing.T) { defer cancel() tableacl.Register("simpleacl", &simpleacl.Factory{}) config := tabletenv.NewDefaultConfig() - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), sqlparser.NewTestParser()) f, err := os.CreateTemp("", "tableacl") require.NoError(t, err) @@ -2507,13 +2514,13 @@ func TestDatabaseNameReplaceByKeyspaceNameReserveBeginExecuteMethod(t *testing.T func setupTabletServerTest(t testing.TB, ctx context.Context, keyspaceName string) (*fakesqldb.DB, *TabletServer) { config := tabletenv.NewDefaultConfig() - return setupTabletServerTestCustom(t, ctx, config, keyspaceName) + return setupTabletServerTestCustom(t, ctx, config, keyspaceName, sqlparser.NewTestParser()) } -func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string) (*fakesqldb.DB, *TabletServer) { +func setupTabletServerTestCustom(t testing.TB, ctx context.Context, config *tabletenv.TabletConfig, keyspaceName string, parser *sqlparser.Parser) (*fakesqldb.DB, *TabletServer) { db := setupFakeDB(t) - sidecardb.AddSchemaInitQueries(db, true) - tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}) + sidecardb.AddSchemaInitQueries(db, true, parser) + tsv := NewTabletServer(ctx, "TabletServerTest", config, memorytopo.NewServer(ctx, ""), &topodatapb.TabletAlias{}, collations.MySQL8(), parser) require.Equal(t, StateNotConnected, tsv.sm.State()) dbcfgs := newDBConfigs(db) target := &querypb.Target{ @@ -2660,7 +2667,8 @@ func addTabletServerSupportedQueries(db *fakesqldb.DB) { "rollback": {}, fmt.Sprintf(sqlReadAllRedo, "_vt", "_vt"): {}, } - sidecardb.AddSchemaInitQueries(db, true) + parser := sqlparser.NewTestParser() + sidecardb.AddSchemaInitQueries(db, true, parser) for query, result := range queryResultMap { db.AddQuery(query, result) } diff --git a/go/vt/vttablet/tabletserver/testutils_test.go b/go/vt/vttablet/tabletserver/testutils_test.go index 4760558f6ec..464e84ab47f 100644 --- a/go/vt/vttablet/tabletserver/testutils_test.go +++ b/go/vt/vttablet/tabletserver/testutils_test.go @@ -30,7 +30,7 @@ import ( var errRejected = errors.New("rejected") func newDBConfigs(db *fakesqldb.DB) *dbconfigs.DBConfigs { - params, _ := db.ConnParams().MysqlParams() + params := db.ConnParams() cp := *params return dbconfigs.NewTestDBConfigs(cp, cp, "fakesqldb") } diff --git a/go/vt/vttablet/tabletserver/throttle/throttler.go b/go/vt/vttablet/tabletserver/throttle/throttler.go index 6eccb17b592..d7739b52703 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler.go @@ -355,7 +355,6 @@ func (throttler *Throttler) normalizeThrottlerConfig(throttlerConfig *topodatapb } func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspace, err error) bool { - log.Infof("Throttler: WatchSrvKeyspaceCallback called with: %+v", srvks) if err != nil { log.Errorf("WatchSrvKeyspaceCallback error: %v", err) return false @@ -366,7 +365,6 @@ func (throttler *Throttler) WatchSrvKeyspaceCallback(srvks *topodatapb.SrvKeyspa // Throttler is enabled and we should apply the config change // through Operate() or else we get into race conditions. go func() { - log.Infof("Throttler: submitting a throttler config apply message with: %+v", throttlerConfig) throttler.throttlerConfigChan <- throttlerConfig }() } else { diff --git a/go/vt/vttablet/tabletserver/throttle/throttler_test.go b/go/vt/vttablet/tabletserver/throttle/throttler_test.go index b40f3a0a9cb..f0d895e1413 100644 --- a/go/vt/vttablet/tabletserver/throttle/throttler_test.go +++ b/go/vt/vttablet/tabletserver/throttle/throttler_test.go @@ -28,6 +28,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/tabletserver/connpool" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -108,7 +110,7 @@ func newTestThrottler() *Throttler { s.ThrottleThreshold = &atomic.Uint64{} s.ThrottleThreshold.Store(1) } - env := tabletenv.NewEnv(nil, "TabletServerTest") + env := tabletenv.NewEnv(nil, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser()) throttler := &Throttler{ mysqlClusterProbesChan: make(chan *mysql.ClusterProbes), mysqlClusterThresholds: cache.New(cache.NoExpiration, 0), diff --git a/go/vt/vttablet/tabletserver/tx/api.go b/go/vt/vttablet/tabletserver/tx/api.go index a06923776c0..a392e530ffa 100644 --- a/go/vt/vttablet/tabletserver/tx/api.go +++ b/go/vt/vttablet/tabletserver/tx/api.go @@ -126,7 +126,7 @@ func (p *Properties) RecordQuery(query string) { func (p *Properties) InTransaction() bool { return p != nil } // String returns a printable version of the transaction -func (p *Properties) String(sanitize bool) string { +func (p *Properties) String(sanitize bool, parser *sqlparser.Parser) string { if p == nil { return "" } @@ -135,7 +135,7 @@ func (p *Properties) String(sanitize bool) string { sb := strings.Builder{} for _, query := range p.Queries { if sanitize { - query, _ = sqlparser.RedactSQLQuery(query) + query, _ = parser.RedactSQLQuery(query) } sb.WriteString(query) sb.WriteString(";") diff --git a/go/vt/vttablet/tabletserver/tx_engine_test.go b/go/vt/vttablet/tabletserver/tx_engine_test.go index 8b190d675f8..d2ec33ef969 100644 --- a/go/vt/vttablet/tabletserver/tx_engine_test.go +++ b/go/vt/vttablet/tabletserver/tx_engine_test.go @@ -25,6 +25,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" "github.com/stretchr/testify/assert" @@ -48,7 +50,7 @@ func TestTxEngineClose(t *testing.T) { config.TxPool.Size = 10 config.Oltp.TxTimeout = 100 * time.Millisecond config.GracePeriods.Shutdown = 0 - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) // Normal close. te.AcceptReadWrite() @@ -151,7 +153,7 @@ func TestTxEngineBegin(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) for _, exec := range []func() (int64, string, error){ func() (int64, string, error) { @@ -197,7 +199,7 @@ func TestTxEngineRenewFails(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) te.AcceptReadOnly() options := &querypb.ExecuteOptions{} connID, _, err := te.ReserveBegin(ctx, options, nil, nil) @@ -535,7 +537,7 @@ func setupTxEngine(db *fakesqldb.DB) *TxEngine { config.TxPool.Size = 10 config.Oltp.TxTimeout = 100 * time.Millisecond config.GracePeriods.Shutdown = 0 - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) return te } @@ -567,7 +569,7 @@ func TestTxEngineFailReserve(t *testing.T) { db.AddQueryPattern(".*", &sqltypes.Result{}) config := tabletenv.NewDefaultConfig() config.DB = newDBConfigs(db) - te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest")) + te := NewTxEngine(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) options := &querypb.ExecuteOptions{} _, err := te.Reserve(ctx, options, 0, nil) diff --git a/go/vt/vttablet/tabletserver/tx_pool.go b/go/vt/vttablet/tabletserver/tx_pool.go index f42e3c95408..f01d12b655c 100644 --- a/go/vt/vttablet/tabletserver/tx_pool.go +++ b/go/vt/vttablet/tabletserver/tx_pool.go @@ -130,7 +130,7 @@ func (tp *TxPool) Shutdown(ctx context.Context) { func (tp *TxPool) transactionKiller() { defer tp.env.LogError() for _, conn := range tp.scp.GetElapsedTimeout(vterrors.TxKillerRollback) { - log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages)) + log.Warningf("killing transaction (exceeded timeout: %v): %s", conn.timeout, conn.String(tp.env.Config().SanitizeLogMessages, tp.env.SQLParser())) switch { case conn.IsTainted(): conn.Close() diff --git a/go/vt/vttablet/tabletserver/tx_pool_test.go b/go/vt/vttablet/tabletserver/tx_pool_test.go index 71bba1e128d..eae3153d874 100644 --- a/go/vt/vttablet/tabletserver/tx_pool_test.go +++ b/go/vt/vttablet/tabletserver/tx_pool_test.go @@ -23,8 +23,11 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/dbconfigs" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tx" @@ -215,7 +218,8 @@ func primeTxPoolWithConnection(t *testing.T, ctx context.Context) (*fakesqldb.DB txPool, _ := newTxPool() // Set the capacity to 1 to ensure that the db connection is reused. txPool.scp.conns.SetCapacity(1) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) // Run a query to trigger a database connection. That connection will be // reused by subsequent transactions. @@ -374,7 +378,8 @@ func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { assertErrorMatch(id, "pool closed") txPool, _ = newTxPool() - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) conn1, _, _, _ = txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil, nil) id = conn1.ReservedID() @@ -389,7 +394,7 @@ func TestTxPoolGetConnRecentlyRemovedTransaction(t *testing.T) { env.Config().SetTxTimeoutForWorkload(1*time.Millisecond, querypb.ExecuteOptions_OLTP) env.Config().SetTxTimeoutForWorkload(1*time.Millisecond, querypb.ExecuteOptions_OLAP) txPool, _ = newTxPoolWithEnv(env) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + txPool.Open(params, params, params) defer txPool.Close() conn1, _, _, err = txPool.Begin(ctx, &querypb.ExecuteOptions{}, false, 0, nil, nil) @@ -820,7 +825,7 @@ func newEnv(exporterName string) tabletenv.Env { config.OltpReadPool.IdleTimeout = 30 * time.Second config.OlapReadPool.IdleTimeout = 30 * time.Second config.TxPool.IdleTimeout = 30 * time.Second - env := tabletenv.NewEnv(config, exporterName) + env := tabletenv.NewEnv(config, exporterName, collations.MySQL8(), sqlparser.NewTestParser()) return env } @@ -869,7 +874,8 @@ func setup(t *testing.T) (*fakesqldb.DB, *TxPool, *fakeLimiter, func()) { db.AddQueryPattern(".*", &sqltypes.Result{}) txPool, limiter := newTxPool() - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) return db, txPool, limiter, func() { txPool.Close() @@ -882,7 +888,8 @@ func setupWithEnv(t *testing.T, env tabletenv.Env) (*fakesqldb.DB, *TxPool, *fak db.AddQueryPattern(".*", &sqltypes.Result{}) txPool, limiter := newTxPoolWithEnv(env) - txPool.Open(db.ConnParams(), db.ConnParams(), db.ConnParams()) + params := dbconfigs.New(db.ConnParams()) + txPool.Open(params, params, params) return db, txPool, limiter, func() { txPool.Close() diff --git a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go index f41f4a9089c..ec9d9e1203a 100644 --- a/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go +++ b/go/vt/vttablet/tabletserver/txlimiter/tx_limiter_test.go @@ -19,7 +19,9 @@ package txlimiter import ( "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/callerid" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" querypb "vitess.io/vitess/go/vt/proto/query" @@ -47,7 +49,7 @@ func TestTxLimiter_DisabledAllowsAll(t *testing.T) { config.TransactionLimitByPrincipal = false config.TransactionLimitByComponent = false config.TransactionLimitBySubcomponent = false - limiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + limiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) im, ef := createCallers("", "", "", "") for i := 0; i < 5; i++ { if got, want := limiter.Get(im, ef), true; got != want { @@ -69,7 +71,7 @@ func TestTxLimiter_LimitsOnlyOffendingUser(t *testing.T) { config.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) @@ -135,7 +137,7 @@ func TestTxLimiterDryRun(t *testing.T) { config.TransactionLimitBySubcomponent = false // This should allow 3 slots to all users - newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest")) + newlimiter := New(tabletenv.NewEnv(config, "TabletServerTest", collations.MySQL8(), sqlparser.NewTestParser())) limiter, ok := newlimiter.(*Impl) if !ok { t.Fatalf("New returned limiter of unexpected type: got %T, want %T", newlimiter, limiter) diff --git a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go index d495800e141..a2af61da963 100644 --- a/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go +++ b/go/vt/vttablet/tabletserver/txserializer/tx_serializer_test.go @@ -17,6 +17,7 @@ limitations under the License. package txserializer import ( + "context" "fmt" "net/http" "net/http/httptest" @@ -25,9 +26,9 @@ import ( "testing" "time" - "context" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/streamlog" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" @@ -48,7 +49,7 @@ func TestTxSerializer_NoHotRow(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -80,7 +81,7 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) done, waited, err := txs.Wait(context.Background(), "t1 where1", "t1") @@ -104,7 +105,7 @@ func TestTxSerializerRedactDebugUI(t *testing.T) { func TestKeySanitization(t *testing.T) { config := tabletenv.NewDefaultConfig() - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) // with a where clause key := "t1 where c1='foo'" want := "t1 ... [REDACTED]" @@ -126,7 +127,7 @@ func TestTxSerializer(t *testing.T) { config.HotRowProtection.MaxQueueSize = 2 config.HotRowProtection.MaxGlobalQueueSize = 3 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -199,7 +200,7 @@ func TestTxSerializer_ConcurrentTransactions(t *testing.T) { config.HotRowProtection.MaxQueueSize = 3 config.HotRowProtection.MaxGlobalQueueSize = 3 config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -322,7 +323,7 @@ func TestTxSerializerCancel(t *testing.T) { config.HotRowProtection.MaxQueueSize = 4 config.HotRowProtection.MaxGlobalQueueSize = 4 config.HotRowProtection.MaxConcurrency = 2 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx3 and tx4 will record their number once they're done waiting. @@ -423,7 +424,7 @@ func TestTxSerializerDryRun(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 2 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) resetVariables(txs) // tx1. @@ -493,7 +494,7 @@ func TestTxSerializerGlobalQueueOverflow(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) // tx1. done1, waited1, err1 := txs.Wait(context.Background(), "t1 where1", "t1") @@ -534,7 +535,7 @@ func TestTxSerializerPending(t *testing.T) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 1 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) if got, want := txs.Pending("t1 where1"), 0; got != want { t.Errorf("there should be no pending transaction: got = %v, want = %v", got, want) } @@ -545,7 +546,7 @@ func BenchmarkTxSerializer_NoHotRow(b *testing.B) { config.HotRowProtection.MaxQueueSize = 1 config.HotRowProtection.MaxGlobalQueueSize = 1 config.HotRowProtection.MaxConcurrency = 5 - txs := New(tabletenv.NewEnv(config, "TxSerializerTest")) + txs := New(tabletenv.NewEnv(config, "TxSerializerTest", collations.MySQL8(), sqlparser.NewTestParser())) b.ResetTimer() diff --git a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go index 64c35f6ea95..3463e1ef65e 100644 --- a/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go +++ b/go/vt/vttablet/tabletserver/txthrottler/tx_throttler_test.go @@ -28,7 +28,9 @@ import ( "github.com/stretchr/testify/assert" "go.uber.org/mock/gomock" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/throttler" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -42,7 +44,7 @@ import ( func TestDisabledThrottler(t *testing.T) { config := tabletenv.NewDefaultConfig() config.EnableTxThrottler = false - env := tabletenv.NewEnv(config, t.Name()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) throttler := NewTxThrottler(env, nil) throttler.InitDBConfig(&querypb.Target{ Keyspace: "keyspace", @@ -106,7 +108,7 @@ func TestEnabledThrottler(t *testing.T) { config.EnableTxThrottler = true config.TxThrottlerTabletTypes = &topoproto.TabletTypeListFlag{topodatapb.TabletType_REPLICA} - env := tabletenv.NewEnv(config, t.Name()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) throttler := NewTxThrottler(env, ts) throttlerImpl, _ := throttler.(*txThrottler) assert.NotNil(t, throttlerImpl) @@ -169,7 +171,7 @@ func TestFetchKnownCells(t *testing.T) { func TestDryRunThrottler(t *testing.T) { config := tabletenv.NewDefaultConfig() - env := tabletenv.NewEnv(config, t.Name()) + env := tabletenv.NewEnv(config, t.Name(), collations.MySQL8(), sqlparser.NewTestParser()) testCases := []struct { Name string diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine.go b/go/vt/vttablet/tabletserver/vstreamer/engine.go index 2862601bf1b..977d0453513 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine.go @@ -432,7 +432,7 @@ func (vse *Engine) setWatch() { } var vschema *vindexes.VSchema if v != nil { - vschema = vindexes.BuildVSchema(v) + vschema = vindexes.BuildVSchema(v, vse.env.SQLParser()) if err != nil { log.Errorf("Error building vschema: %v", err) vse.vschemaErrors.Add(1) @@ -590,9 +590,13 @@ func (vse *Engine) getMySQLEndpoint(ctx context.Context, db dbconfigs.Connector) // mapPKEquivalentCols gets a PK equivalent from mysqld for the table // and maps the column names to field indexes in the MinimalTable struct. -func (vse *Engine) mapPKEquivalentCols(ctx context.Context, table *binlogdatapb.MinimalTable) ([]int, error) { - mysqld := mysqlctl.NewMysqld(vse.env.Config().DB) - pkeColNames, indexName, err := mysqld.GetPrimaryKeyEquivalentColumns(ctx, vse.env.Config().DB.DBName, table.Name) +func (vse *Engine) mapPKEquivalentCols(ctx context.Context, db dbconfigs.Connector, table *binlogdatapb.MinimalTable) ([]int, error) { + conn, err := db.Connect(ctx) + if err != nil { + return nil, err + } + defer conn.Close() + pkeColNames, indexName, err := mysqlctl.GetPrimaryKeyEquivalentColumns(ctx, conn.ExecuteFetch, vse.env.Config().DB.DBName, table.Name) if err != nil { return nil, err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go index 36bcc8f181a..35bea172cd0 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/engine_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/engine_test.go @@ -243,7 +243,7 @@ func TestVStreamerWaitForMySQL(t *testing.T) { testDB.AddQuery(replicaLagQuery, sbmres) for _, tt := range tests { - tt.fields.cp = testDB.ConnParams() + tt.fields.cp = dbconfigs.New(testDB.ConnParams()) ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() t.Run(tt.name, func(t *testing.T) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go index 90387e97f2c..83369f27d5e 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/fuzz.go +++ b/go/vt/vttablet/tabletserver/vstreamer/fuzz.go @@ -24,6 +24,7 @@ import ( binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -65,7 +66,7 @@ func Fuzz(data []byte) int { if err != nil { return -1 } - _, _ = buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ + _, _ = buildPlan(t1, testLocalVSchema, sqlparser.NewTestParser(), &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{ {Match: str1, Filter: str2}, }, diff --git a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go index f514298e844..5d57effbadf 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/local_vschema_test.go @@ -23,6 +23,7 @@ import ( "github.com/stretchr/testify/assert" vschemapb "vitess.io/vitess/go/vt/proto/vschema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" ) @@ -86,7 +87,7 @@ func TestFindColVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string @@ -149,7 +150,7 @@ func TestFindOrCreateVindex(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) lvs := &localVSchema{ keyspace: "ks1", @@ -204,7 +205,7 @@ func TestFindTable(t *testing.T) { }, }, } - vschema := vindexes.BuildVSchema(testSrvVSchema) + vschema := vindexes.BuildVSchema(testSrvVSchema, sqlparser.NewTestParser()) testcases := []struct { keyspace string diff --git a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go index f3743c6de46..af41f900d25 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/main_flaky_test.go @@ -26,7 +26,9 @@ import ( _flag "vitess.io/vitess/go/internal/flag" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" "vitess.io/vitess/go/vt/vttablet/tabletserver/vstreamer/testenv" ) @@ -93,7 +95,7 @@ func customEngine(t *testing.T, modifier func(mysql.ConnParams) mysql.ConnParams config := env.TabletEnv.Config().Clone() config.DB = dbconfigs.NewTestDBConfigs(modified, modified, modified.DbName) - engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest"), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) + engine := NewEngine(tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8(), sqlparser.NewTestParser()), env.SrvTopo, env.SchemaEngine, nil, env.Cells[0]) engine.InitDBConfig(env.KeyspaceName, env.ShardName) engine.Open() return engine diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go index fc9408d050e..e89276a5c98 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder.go @@ -56,6 +56,8 @@ type Plan struct { // Filters is the list of filters to be applied to the columns // of the table. Filters []Filter + + collationEnv *collations.Environment } // Opcode enumerates the operators supported in a where clause @@ -162,14 +164,14 @@ func getOpcode(comparison *sqlparser.ComparisonExpr) (Opcode, error) { } // compare returns true after applying the comparison specified in the Filter to the actual data in the column -func compare(comparison Opcode, columnValue, filterValue sqltypes.Value, charset collations.ID) (bool, error) { +func compare(comparison Opcode, columnValue, filterValue sqltypes.Value, collationEnv *collations.Environment, charset collations.ID) (bool, error) { // use null semantics: return false if either value is null if columnValue.IsNull() || filterValue.IsNull() { return false, nil } // at this point neither values can be null // NullsafeCompare returns 0 if values match, -1 if columnValue < filterValue, 1 if columnValue > filterValue - result, err := evalengine.NullsafeCompare(columnValue, filterValue, charset) + result, err := evalengine.NullsafeCompare(columnValue, filterValue, collationEnv, charset) if err != nil { return false, err } @@ -228,7 +230,7 @@ func (plan *Plan) filter(values, result []sqltypes.Value, charsets []collations. return false, nil } default: - match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, charsets[filter.ColNum]) + match, err := compare(filter.Opcode, values[filter.ColNum], filter.Value, plan.collationEnv, charsets[filter.ColNum]) if err != nil { return false, err } @@ -284,11 +286,11 @@ func mustSendStmt(query mysql.Query, dbname string) bool { return true } -func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter) bool { +func mustSendDDL(query mysql.Query, dbname string, filter *binlogdatapb.Filter, parser *sqlparser.Parser) bool { if query.Database != "" && query.Database != dbname { return false } - ast, err := sqlparser.Parse(query.SQL) + ast, err := parser.Parse(query.SQL) // If there was a parsing error, we send it through. Hopefully, // recipient can handle it. if err != nil { @@ -344,7 +346,7 @@ func tableMatches(table sqlparser.TableName, dbname string, filter *binlogdatapb return ruleMatches(table.Name.String(), filter) } -func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (*Plan, error) { +func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter, collationEnv *collations.Environment, parser *sqlparser.Parser) (*Plan, error) { for _, rule := range filter.Rules { switch { case strings.HasPrefix(rule.Match, "/"): @@ -356,9 +358,9 @@ func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (* if !result { continue } - return buildREPlan(ti, vschema, rule.Filter) + return buildREPlan(ti, vschema, rule.Filter, collationEnv) case rule.Match == ti.Name: - return buildTablePlan(ti, vschema, rule.Filter) + return buildTablePlan(ti, vschema, rule.Filter, collationEnv, parser) } } return nil, nil @@ -366,9 +368,10 @@ func buildPlan(ti *Table, vschema *localVSchema, filter *binlogdatapb.Filter) (* // buildREPlan handles cases where Match has a regular expression. // If so, the Filter can be an empty string or a keyrange, like "-80". -func buildREPlan(ti *Table, vschema *localVSchema, filter string) (*Plan, error) { +func buildREPlan(ti *Table, vschema *localVSchema, filter string, collationEnv *collations.Environment) (*Plan, error) { plan := &Plan{ - Table: ti, + Table: ti, + collationEnv: collationEnv, } plan.ColExprs = make([]ColExpr, len(ti.Fields)) for i, col := range ti.Fields { @@ -409,8 +412,8 @@ func buildREPlan(ti *Table, vschema *localVSchema, filter string) (*Plan, error) // BuildTablePlan handles cases where a specific table name is specified. // The filter must be a select statement. -func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, error) { - sel, fromTable, err := analyzeSelect(query) +func buildTablePlan(ti *Table, vschema *localVSchema, query string, collationEnv *collations.Environment, parser *sqlparser.Parser) (*Plan, error) { + sel, fromTable, err := analyzeSelect(query, parser) if err != nil { log.Errorf("%s", err.Error()) return nil, err @@ -421,7 +424,8 @@ func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, erro } plan := &Plan{ - Table: ti, + Table: ti, + collationEnv: collationEnv, } if err := plan.analyzeWhere(vschema, sel.Where); err != nil { log.Errorf("%s", err.Error()) @@ -439,8 +443,8 @@ func buildTablePlan(ti *Table, vschema *localVSchema, query string) (*Plan, erro return plan, nil } -func analyzeSelect(query string) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { - statement, err := sqlparser.Parse(query) +func analyzeSelect(query string, parser *sqlparser.Parser) (sel *sqlparser.Select, fromTable sqlparser.IdentifierCS, err error) { + statement, err := parser.Parse(query) if err != nil { return nil, fromTable, err } @@ -532,11 +536,14 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er if val.Type != sqlparser.IntVal && val.Type != sqlparser.StrVal { return fmt.Errorf("unexpected: %v", sqlparser.String(expr)) } - pv, err := evalengine.Translate(val, nil) + pv, err := evalengine.Translate(val, &evalengine.Config{ + Collation: plan.collationEnv.DefaultConnectionCharset(), + CollationEnv: plan.collationEnv, + }) if err != nil { return err } - env := evalengine.EmptyExpressionEnv() + env := evalengine.EmptyExpressionEnv(plan.collationEnv) resolved, err := env.Evaluate(pv) if err != nil { return err @@ -544,7 +551,7 @@ func (plan *Plan) analyzeWhere(vschema *localVSchema, where *sqlparser.Where) er plan.Filters = append(plan.Filters, Filter{ Opcode: opcode, ColNum: colnum, - Value: resolved.Value(collations.Default()), + Value: resolved.Value(plan.collationEnv.DefaultConnectionCharset()), }) case *sqlparser.FuncExpr: if !expr.Name.EqualString("in_keyrange") { diff --git a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go index 03001362073..b6b62098060 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/planbuilder_test.go @@ -20,18 +20,16 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/mysql/collations" - "vitess.io/vitess/go/vt/proto/topodata" - - "github.com/stretchr/testify/require" - - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vtgate/vindexes" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -86,7 +84,7 @@ func init() { "ks": &kspb, }, } - vschema := vindexes.BuildVSchema(srvVSchema) + vschema := vindexes.BuildVSchema(srvVSchema, sqlparser.NewTestParser()) testLocalVSchema = &localVSchema{ keyspace: "ks", vschema: vschema, @@ -167,7 +165,7 @@ func TestMustSendDDL(t *testing.T) { }} for _, tcase := range testcases { q := mysql.Query{SQL: tcase.sql, Database: tcase.db} - got := mustSendDDL(q, "mydb", filter) + got := mustSendDDL(q, "mydb", filter, sqlparser.NewTestParser()) if got != tcase.output { t.Errorf("%v: %v, want %v", q, got, tcase.output) } @@ -259,6 +257,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -289,6 +288,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -311,6 +311,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -333,6 +334,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_BINARY_FLAG), }, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -355,6 +357,7 @@ func TestPlanBuilder(t *testing.T) { Flags: uint32(querypb.MySqlFlag_NUM_FLAG), }, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -385,6 +388,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -415,6 +419,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0}, KeyRange: nil, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -445,6 +450,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: nil, KeyRange: nil, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t2, @@ -478,6 +484,7 @@ func TestPlanBuilder(t *testing.T) { VindexColumns: []int{0, 1}, KeyRange: nil, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -501,6 +508,7 @@ func TestPlanBuilder(t *testing.T) { }, }}, convertUsingUTF8Columns: map[string]bool{"val": true}, + collationEnv: collations.MySQL8(), }, }, { inTable: regional, @@ -524,6 +532,7 @@ func TestPlanBuilder(t *testing.T) { Vindex: testLocalVSchema.vschema.Keyspaces["ks"].Vindexes["region_vdx"], VindexColumns: []int{0, 1}, }}, + collationEnv: collations.MySQL8(), }, }, { inTable: t1, @@ -636,7 +645,7 @@ func TestPlanBuilder(t *testing.T) { t.Run(tcase.inRule.String(), func(t *testing.T) { plan, err := buildPlan(tcase.inTable, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{tcase.inRule}, - }) + }, collations.MySQL8(), sqlparser.NewTestParser()) if tcase.outErr != "" { assert.Nil(t, plan) @@ -733,7 +742,7 @@ func TestPlanBuilderFilterComparison(t *testing.T) { t.Run(tcase.name, func(t *testing.T) { plan, err := buildPlan(t1, testLocalVSchema, &binlogdatapb.Filter{ Rules: []*binlogdatapb.Rule{{Match: "t1", Filter: tcase.inFilter}}, - }) + }, collations.MySQL8(), sqlparser.NewTestParser()) if tcase.outErr != "" { assert.Nil(t, plan) @@ -775,7 +784,7 @@ func TestCompare(t *testing.T) { } for _, tc := range testcases { t.Run("", func(t *testing.T) { - got, err := compare(tc.opcode, tc.columnValue, tc.filterValue, collations.CollationUtf8mb4ID) + got, err := compare(tc.opcode, tc.columnValue, tc.filterValue, collations.MySQL8(), collations.CollationUtf8mb4ID) require.NoError(t, err) require.Equal(t, tc.want, got) }) diff --git a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go index 91f319fa2c5..88084d62a50 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/resultstreamer.go @@ -62,7 +62,7 @@ func (rs *resultStreamer) Cancel() { } func (rs *resultStreamer) Stream() error { - _, fromTable, err := analyzeSelect(rs.query) + _, fromTable, err := analyzeSelect(rs.query, rs.vse.env.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go index 99ebbbdfaa5..b287aa08287 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer.go @@ -35,7 +35,6 @@ import ( vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/vterrors" - "vitess.io/vitess/go/vt/vtgate/vindexes" "vitess.io/vitess/go/vt/vttablet" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -45,17 +44,6 @@ var ( rowStreamertHeartbeatInterval = 10 * time.Second ) -// RowStreamer exposes an externally usable interface to rowStreamer. -type RowStreamer interface { - Stream() error - Cancel() -} - -// NewRowStreamer returns a RowStreamer -func NewRowStreamer(ctx context.Context, cp dbconfigs.Connector, se *schema.Engine, query string, lastpk []sqltypes.Value, send func(*binlogdatapb.VStreamRowsResponse) error, vse *Engine, mode RowStreamerMode) RowStreamer { - return newRowStreamer(ctx, cp, se, query, lastpk, &localVSchema{vschema: &vindexes.VSchema{}}, send, vse, mode, nil) -} - type RowStreamerMode int32 const ( @@ -151,7 +139,7 @@ func (rs *rowStreamer) Stream() error { func (rs *rowStreamer) buildPlan() error { // This pre-parsing is required to extract the table name // and create its metadata. - sel, fromTable, err := analyzeSelect(rs.query) + sel, fromTable, err := analyzeSelect(rs.query, rs.se.SQLParser()) if err != nil { return err } @@ -188,7 +176,7 @@ func (rs *rowStreamer) buildPlan() error { // This is because the row format of a read is identical // to the row format of a binlog event. So, the same // filtering will work. - rs.plan, err = buildTablePlan(ti, rs.vschema, rs.query) + rs.plan, err = buildTablePlan(ti, rs.vschema, rs.query, rs.se.CollationEnv(), rs.se.SQLParser()) if err != nil { log.Errorf("%s", err.Error()) return err @@ -235,7 +223,7 @@ func (rs *rowStreamer) buildPKColumns(st *binlogdatapb.MinimalTable) ([]int, err var pkColumns = make([]int, 0) if len(st.PKColumns) == 0 { // Use a PK equivalent if one exists. - pkColumns, err := rs.vse.mapPKEquivalentCols(rs.ctx, st) + pkColumns, err := rs.vse.mapPKEquivalentCols(rs.ctx, rs.cp, st) if err == nil && len(pkColumns) != 0 { return pkColumns, nil } diff --git a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go index 9828481397b..7322e781cc8 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go +++ b/go/vt/vttablet/tabletserver/vstreamer/rowstreamer_test.go @@ -23,12 +23,12 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/log" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" ) @@ -206,7 +206,7 @@ func TestStreamRowsUnicode(t *testing.T) { engine = savedEngine }() engine = customEngine(t, func(in mysql.ConnParams) mysql.ConnParams { - in.Charset = "latin1" + in.Charset = collations.CollationLatin1Swedish return in }) defer engine.Close() diff --git a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go index a05dc3b2c05..4a793407008 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go +++ b/go/vt/vttablet/tabletserver/vstreamer/testenv/testenv.go @@ -26,8 +26,10 @@ import ( "strings" "vitess.io/vitess/go/json2" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/srvtopo" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" @@ -108,7 +110,7 @@ func Init(ctx context.Context) (*Env, error) { te.Dbcfgs = dbconfigs.NewTestDBConfigs(te.cluster.MySQLConnParams(), te.cluster.MySQLAppDebugConnParams(), te.cluster.DbName()) config := tabletenv.NewDefaultConfig() config.DB = te.Dbcfgs - te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest") + te.TabletEnv = tabletenv.NewEnv(config, "VStreamerTest", collations.MySQL8(), sqlparser.NewTestParser()) te.Mysqld = mysqlctl.NewMysqld(te.Dbcfgs) pos, _ := te.Mysqld.PrimaryPosition() if strings.HasPrefix(strings.ToLower(pos.GTIDSet.Flavor()), string(mysqlctl.FlavorMariaDB)) { diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index d8a364d1aef..2ea26c3632d 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -503,7 +503,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_COMMIT, }) case sqlparser.StmtDDL: - if mustSendDDL(q, vs.cp.DBName(), vs.filter) { + if mustSendDDL(q, vs.cp.DBName(), vs.filter, vs.vse.env.SQLParser()) { vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, Gtid: replication.EncodePosition(vs.pos), @@ -520,7 +520,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent) ([]*binlogdatapb.VEvent, e Type: binlogdatapb.VEventType_OTHER, }) } - if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName()) { + if schema.MustReloadSchemaOnDDL(q.SQL, vs.cp.DBName(), vs.vse.env.SQLParser()) { vs.se.ReloadAt(context.Background(), vs.pos) } case sqlparser.StmtSavepoint: @@ -682,7 +682,7 @@ func (vs *vstreamer) buildJournalPlan(id uint64, tm *mysql.TableMap) error { // Build a normal table plan, which means, return all rows // and columns as is. Special handling is done when we actually // receive the row event. We'll build a JOURNAL event instead. - plan, err := buildREPlan(table, nil, "") + plan, err := buildREPlan(table, nil, "", vs.se.CollationEnv()) if err != nil { return err } @@ -716,7 +716,7 @@ func (vs *vstreamer) buildVersionPlan(id uint64, tm *mysql.TableMap) error { // Build a normal table plan, which means, return all rows // and columns as is. Special handling is done when we actually // receive the row event. We'll build a JOURNAL event instead. - plan, err := buildREPlan(table, nil, "") + plan, err := buildREPlan(table, nil, "", vs.se.CollationEnv()) if err != nil { return err } @@ -738,7 +738,7 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap Name: tm.Name, Fields: cols, } - plan, err := buildPlan(table, vs.vschema, vs.filter) + plan, err := buildPlan(table, vs.vschema, vs.filter, vs.se.CollationEnv(), vs.se.SQLParser()) if err != nil { return nil, err } @@ -764,15 +764,16 @@ func (vs *vstreamer) buildTablePlan(id uint64, tm *mysql.TableMap) (*binlogdatap func (vs *vstreamer) buildTableColumns(tm *mysql.TableMap) ([]*querypb.Field, error) { var fields []*querypb.Field for i, typ := range tm.Types { - t, err := sqltypes.MySQLToType(int64(typ), 0) + t, err := sqltypes.MySQLToType(typ, 0) if err != nil { return nil, fmt.Errorf("unsupported type: %d, position: %d", typ, i) } + coll := collations.CollationForType(t, vs.se.CollationEnv().DefaultConnectionCharset()) fields = append(fields, &querypb.Field{ Name: fmt.Sprintf("@%d", i+1), Type: t, - Charset: uint32(collations.DefaultCollationForType(t)), - Flags: mysql.FlagsForColumn(t, collations.DefaultCollationForType(t)), + Charset: uint32(coll), + Flags: mysql.FlagsForColumn(t, coll), }) } st, err := vs.se.GetTableForPos(sqlparser.NewIdentifierCS(tm.Name), replication.EncodePosition(vs.pos)) @@ -956,7 +957,7 @@ func (vs *vstreamer) rebuildPlans() error { // cause that to change. continue } - newPlan, err := buildPlan(plan.Table, vs.vschema, vs.filter) + newPlan, err := buildPlan(plan.Table, vs.vschema, vs.filter, vs.se.CollationEnv(), vs.se.SQLParser()) if err != nil { return err } diff --git a/go/vt/vttest/local_cluster.go b/go/vt/vttest/local_cluster.go index 5ef0ea2c314..8d75dcebe44 100644 --- a/go/vt/vttest/local_cluster.go +++ b/go/vt/vttest/local_cluster.go @@ -35,14 +35,15 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" - "vitess.io/vitess/go/constants/sidecar" - - "vitess.io/vitess/go/vt/sidecardb" + "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/constants/sidecar" "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/proto/logutil" + "vitess.io/vitess/go/vt/sidecardb" "vitess.io/vitess/go/vt/vtctl/vtctlclient" vschemapb "vitess.io/vitess/go/vt/proto/vschema" @@ -280,7 +281,11 @@ type LocalCluster struct { // cluster access should be performed through the vtgate port. func (db *LocalCluster) MySQLConnParams() mysql.ConnParams { connParams := db.mysql.Params(db.DbName()) - connParams.Charset = db.Config.Charset + ch, err := collations.MySQL8().ParseConnectionCharset(db.Config.Charset) + if err != nil { + panic(err) + } + connParams.Charset = ch return connParams } @@ -301,7 +306,11 @@ func (db *LocalCluster) MySQLCleanConnParams() mysql.ConnParams { mysqlctl = toxiproxy.mysqlctl } connParams := mysqlctl.Params(db.DbName()) - connParams.Charset = db.Config.Charset + ch, err := collations.MySQL8().ParseConnectionCharset(db.Config.Charset) + if err != nil { + panic(err) + } + connParams.Charset = ch return connParams } @@ -547,7 +556,7 @@ func (db *LocalCluster) createVTSchema() error { return db.ExecuteFetch(query, "") } - if err := sidecardb.Init(context.Background(), sidecardbExec); err != nil { + if err := sidecardb.Init(context.Background(), sidecardbExec, sqlparser.NewTestParser()); err != nil { return err } return nil diff --git a/go/vt/wrangler/external_cluster_test.go b/go/vt/wrangler/external_cluster_test.go index 3c878411b6b..ebaef4305c4 100644 --- a/go/vt/wrangler/external_cluster_test.go +++ b/go/vt/wrangler/external_cluster_test.go @@ -4,12 +4,13 @@ import ( "context" "testing" - "vitess.io/vitess/go/test/utils" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" ) @@ -18,7 +19,7 @@ func TestVitessCluster(t *testing.T) { defer cancel() ts := memorytopo.NewServer(ctx, "zone1") tmc := newTestWranglerTMClient() - wr := New(logutil.NewConsoleLogger(), ts, tmc) + wr := New(logutil.NewConsoleLogger(), ts, tmc, collations.MySQL8(), sqlparser.NewTestParser()) name, topoType, topoServer, topoRoot := "c1", "x", "y", "z" t.Run("Zero clusters to start", func(t *testing.T) { diff --git a/go/vt/wrangler/fake_dbclient_test.go b/go/vt/wrangler/fake_dbclient_test.go index 03fad81d7b8..7fce5ce9afc 100644 --- a/go/vt/wrangler/fake_dbclient_test.go +++ b/go/vt/wrangler/fake_dbclient_test.go @@ -162,7 +162,7 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (*sqltypes.Resul } func (dc *fakeDBClient) ExecuteFetchMulti(query string, maxrows int) ([]*sqltypes.Result, error) { - queries, err := sqlparser.SplitStatementToPieces(query) + queries, err := sqlparser.NewTestParser().SplitStatementToPieces(query) if err != nil { return nil, err } diff --git a/go/vt/wrangler/fake_tablet_test.go b/go/vt/wrangler/fake_tablet_test.go index cae4e8ffc41..6f23d437460 100644 --- a/go/vt/wrangler/fake_tablet_test.go +++ b/go/vt/wrangler/fake_tablet_test.go @@ -23,29 +23,28 @@ import ( "testing" "time" - vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" - "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" - "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + querypb "vitess.io/vitess/go/vt/proto/query" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vttablet/grpctmserver" "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/queryservice/fakes" "vitess.io/vitess/go/vt/vttablet/tabletconntest" "vitess.io/vitess/go/vt/vttablet/tabletmanager" + vdiff2 "vitess.io/vitess/go/vt/vttablet/tabletmanager/vdiff" "vitess.io/vitess/go/vt/vttablet/tabletservermock" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/vttablet/tmclienttest" - querypb "vitess.io/vitess/go/vt/proto/query" - topodatapb "vitess.io/vitess/go/vt/proto/topodata" - // import the gRPC client implementation for tablet manager _ "vitess.io/vitess/go/vt/vttablet/grpctmclient" @@ -190,7 +189,6 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { ft.Tablet.PortMap["vt"] = vtPort ft.Tablet.PortMap["grpc"] = gRPCPort ft.Tablet.Hostname = "127.0.0.1" - config := &tabletenv.TabletConfig{} // Create a test tm on that port, and re-read the record // (it has new ports and IP). ft.TM = &tabletmanager.TabletManager{ @@ -199,7 +197,9 @@ func (ft *fakeTablet) StartActionLoop(t *testing.T, wr *Wrangler) { MysqlDaemon: ft.FakeMysqlDaemon, DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), - VDiffEngine: vdiff2.NewEngine(config, wr.TopoServer(), ft.Tablet), + VDiffEngine: vdiff2.NewEngine(wr.TopoServer(), ft.Tablet, collations.MySQL8(), sqlparser.NewTestParser()), + CollationEnv: collations.MySQL8(), + SQLParser: sqlparser.NewTestParser(), } if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatal(err) diff --git a/go/vt/wrangler/materializer.go b/go/vt/wrangler/materializer.go index 13f430919fd..9d39eec969d 100644 --- a/go/vt/wrangler/materializer.go +++ b/go/vt/wrangler/materializer.go @@ -445,7 +445,7 @@ func (wr *Wrangler) checkIfPreviousJournalExists(ctx context.Context, mz *materi mu sync.Mutex exists bool tablets []string - ws = workflow.NewServer(wr.ts, wr.tmc) + ws = workflow.NewServer(wr.ts, wr.tmc, wr.parser) ) err := forAllSources(func(si *topo.ShardInfo) error { @@ -540,7 +540,7 @@ func (wr *Wrangler) prepareCreateLookup(ctx context.Context, keyspace string, sp return nil, nil, nil, fmt.Errorf("vindex %s is not a lookup type", vindex.Type) } - targetKeyspace, targetTableName, err = sqlparser.ParseTable(vindex.Params["table"]) + targetKeyspace, targetTableName, err = wr.parser.ParseTable(vindex.Params["table"]) if err != nil || targetKeyspace == "" { return nil, nil, nil, fmt.Errorf("vindex table name must be in the form .
. Got: %v", vindex.Params["table"]) } @@ -837,7 +837,7 @@ func (wr *Wrangler) ExternalizeVindex(ctx context.Context, qualifiedVindexName s return fmt.Errorf("vindex %s not found in vschema", qualifiedVindexName) } - targetKeyspace, targetTableName, err := sqlparser.ParseTable(sourceVindex.Params["table"]) + targetKeyspace, targetTableName, err := wr.parser.ParseTable(sourceVindex.Params["table"]) if err != nil || targetKeyspace == "" { return fmt.Errorf("vindex table name must be in the form .
. Got: %v", sourceVindex.Params["table"]) } @@ -1064,7 +1064,7 @@ func (wr *Wrangler) buildMaterializer(ctx context.Context, ms *vtctldatapb.Mater if err != nil { return nil, err } - targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace) + targetVSchema, err := vindexes.BuildKeyspaceSchema(vschema, ms.TargetKeyspace, wr.parser) if err != nil { return nil, err } @@ -1220,7 +1220,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { if createDDL == createDDLAsCopy || createDDL == createDDLAsCopyDropConstraint || createDDL == createDDLAsCopyDropForeignKeys { if ts.SourceExpression != "" { // Check for table if non-empty SourceExpression. - sourceTableName, err := sqlparser.TableFromStatement(ts.SourceExpression) + sourceTableName, err := mz.wr.parser.TableFromStatement(ts.SourceExpression) if err != nil { return err } @@ -1236,7 +1236,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropConstraint { - strippedDDL, err := stripTableConstraints(ddl) + strippedDDL, err := stripTableConstraints(ddl, mz.wr.parser) if err != nil { return err } @@ -1245,7 +1245,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { } if createDDL == createDDLAsCopyDropForeignKeys { - strippedDDL, err := stripTableForeignKeys(ddl) + strippedDDL, err := stripTableForeignKeys(ddl, mz.wr.parser) if err != nil { return err } @@ -1266,7 +1266,7 @@ func (mz *materializer) deploySchema(ctx context.Context) error { // We use schemadiff to normalize the schema. // For now, and because this is could have wider implications, we ignore any errors in // reading the source schema. - schema, err := schemadiff.NewSchemaFromQueries(applyDDLs) + schema, err := schemadiff.NewSchemaFromQueries(applyDDLs, mz.wr.parser) if err != nil { log.Error(vterrors.Wrapf(err, "AtomicCopy: failed to normalize schema via schemadiff")) } else { @@ -1291,9 +1291,8 @@ func (mz *materializer) deploySchema(ctx context.Context) error { }) } -func stripTableForeignKeys(ddl string) (string, error) { - - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableForeignKeys(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1321,8 +1320,8 @@ func stripTableForeignKeys(ddl string) (string, error) { return newDDL, nil } -func stripTableConstraints(ddl string) (string, error) { - ast, err := sqlparser.ParseStrictDDL(ddl) +func stripTableConstraints(ddl string, parser *sqlparser.Parser) (string, error) { + ast, err := parser.ParseStrictDDL(ddl) if err != nil { return "", err } @@ -1368,7 +1367,7 @@ func (mz *materializer) generateInserts(ctx context.Context, sourceShards []*top } // Validate non-empty query. - stmt, err := sqlparser.Parse(ts.SourceExpression) + stmt, err := mz.wr.parser.Parse(ts.SourceExpression) if err != nil { return "", err } diff --git a/go/vt/wrangler/materializer_env_test.go b/go/vt/wrangler/materializer_env_test.go index b98621ffa1b..6c209ad21f6 100644 --- a/go/vt/wrangler/materializer_env_test.go +++ b/go/vt/wrangler/materializer_env_test.go @@ -30,6 +30,7 @@ import ( "go.uber.org/goleak" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" @@ -129,7 +130,8 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + parser := sqlparser.NewTestParser() + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), parser) tabletID := 100 for _, shard := range sources { _ = env.addTablet(tabletID, env.ms.SourceKeyspace, shard, topodatapb.TabletType_PRIMARY) @@ -145,7 +147,7 @@ func newTestMaterializerEnv(t *testing.T, ctx context.Context, ms *vtctldatapb.M for _, ts := range ms.TableSettings { tableName := ts.TargetTable - table, err := sqlparser.TableFromStatement(ts.SourceExpression) + table, err := parser.TableFromStatement(ts.SourceExpression) if err == nil { tableName = table.Name.String() } diff --git a/go/vt/wrangler/materializer_test.go b/go/vt/wrangler/materializer_test.go index 242bca31e49..5dd5929adb9 100644 --- a/go/vt/wrangler/materializer_test.go +++ b/go/vt/wrangler/materializer_test.go @@ -30,9 +30,11 @@ import ( "google.golang.org/protobuf/encoding/prototext" "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/utils" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate/vindexes" @@ -1541,7 +1543,7 @@ func TestCreateLookupVindexFailures(t *testing.T) { defer cancel() topoServ := memorytopo.NewServer(ctx, "cell") - wr := New(logutil.NewConsoleLogger(), topoServ, nil) + wr := New(logutil.NewConsoleLogger(), topoServ, nil, collations.MySQL8(), sqlparser.NewTestParser()) unique := map[string]*vschemapb.Vindex{ "v": { @@ -2541,7 +2543,7 @@ func TestMaterializerNoSourcePrimary(t *testing.T) { cell: "cell", tmc: newTestMaterializerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) defer env.close() tabletID := 100 @@ -2870,7 +2872,7 @@ func TestStripForeignKeys(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableForeignKeys(tc.ddl) + newDDL, err := stripTableForeignKeys(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } @@ -2944,7 +2946,7 @@ func TestStripConstraints(t *testing.T) { } for _, tc := range tcs { - newDDL, err := stripTableConstraints(tc.ddl) + newDDL, err := stripTableConstraints(tc.ddl, sqlparser.NewTestParser()) if tc.hasErr != (err != nil) { t.Fatalf("hasErr does not match: err: %v, tc: %+v", err, tc) } diff --git a/go/vt/wrangler/reparent.go b/go/vt/wrangler/reparent.go index dbad6b2ee29..d23f3f016f8 100644 --- a/go/vt/wrangler/reparent.go +++ b/go/vt/wrangler/reparent.go @@ -60,7 +60,7 @@ func (wr *Wrangler) InitShardPrimary(ctx context.Context, keyspace, shard string ev := &events.Reparent{} // do the work - err = grpcvtctldserver.NewVtctldServer(wr.ts).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ + err = grpcvtctldserver.NewVtctldServer(wr.ts, wr.parser).InitShardPrimaryLocked(ctx, ev, &vtctldatapb.InitShardPrimaryRequest{ Keyspace: keyspace, Shard: shard, PrimaryElectTabletAlias: primaryElectTabletAlias, diff --git a/go/vt/wrangler/resharder_env_test.go b/go/vt/wrangler/resharder_env_test.go index ee39c7e5eaa..911a0be6d9c 100644 --- a/go/vt/wrangler/resharder_env_test.go +++ b/go/vt/wrangler/resharder_env_test.go @@ -26,9 +26,11 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/key" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -93,7 +95,7 @@ func newTestResharderEnv(t *testing.T, ctx context.Context, sources, targets []s cell: "cell", tmc: newTestResharderTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) initTopo(t, env.topoServ, "ks", sources, targets, []string{"cell"}) tabletID := 100 for _, shard := range sources { diff --git a/go/vt/wrangler/tablet_test.go b/go/vt/wrangler/tablet_test.go index 1350b6b574c..6df20535d71 100644 --- a/go/vt/wrangler/tablet_test.go +++ b/go/vt/wrangler/tablet_test.go @@ -21,8 +21,10 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" ) @@ -36,7 +38,7 @@ func TestInitTabletShardConversion(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -70,7 +72,7 @@ func TestDeleteTabletBasic(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -102,7 +104,7 @@ func TestDeleteTabletTruePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -149,7 +151,7 @@ func TestDeleteTabletFalsePrimary(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet1 := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ @@ -201,7 +203,7 @@ func TestDeleteTabletShardNonExisting(t *testing.T) { cell := "cell1" ts := memorytopo.NewServer(ctx, cell) - wr := New(logutil.NewConsoleLogger(), ts, nil) + wr := New(logutil.NewConsoleLogger(), ts, nil, collations.MySQL8(), sqlparser.NewTestParser()) tablet := &topodatapb.Tablet{ Alias: &topodatapb.TabletAlias{ diff --git a/go/vt/wrangler/testlib/backup_test.go b/go/vt/wrangler/testlib/backup_test.go index 0ba8adc9a06..b188b5343d5 100644 --- a/go/vt/wrangler/testlib/backup_test.go +++ b/go/vt/wrangler/testlib/backup_test.go @@ -27,17 +27,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/replication" - - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/mysqlctl/filebackupstorage" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -92,7 +92,7 @@ func testBackupRestore(t *testing.T, cDetails *compressionDetails) error { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -344,7 +344,7 @@ func TestBackupRestoreLagged(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -563,7 +563,7 @@ func TestRestoreUnreachablePrimary(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -738,7 +738,7 @@ func TestDisableActiveReparents(t *testing.T) { db := fakesqldb.New(t) defer db.Close() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/copy_schema_shard_test.go b/go/vt/wrangler/testlib/copy_schema_shard_test.go index 866ec2fe931..f45bb8dba1e 100644 --- a/go/vt/wrangler/testlib/copy_schema_shard_test.go +++ b/go/vt/wrangler/testlib/copy_schema_shard_test.go @@ -22,12 +22,13 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/discovery" - + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/mysqlctl/tmutils" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/tabletmanager/vreplication" @@ -56,7 +57,7 @@ func copySchema(t *testing.T, useShardAsSource bool) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go index 99cc1839186..b4c9dc4c8a7 100644 --- a/go/vt/wrangler/testlib/emergency_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/emergency_reparent_shard_test.go @@ -25,12 +25,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql/replication" - "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sets" "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" @@ -50,7 +51,7 @@ func TestEmergencyReparentShard(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -204,7 +205,7 @@ func TestEmergencyReparentShardPrimaryElectNotBest(t *testing.T) { discovery.SetTabletPickerRetryDelay(5 * time.Millisecond) ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create a primary, a couple good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) diff --git a/go/vt/wrangler/testlib/external_reparent_test.go b/go/vt/wrangler/testlib/external_reparent_test.go index c0152de3cf3..f5f1b157406 100644 --- a/go/vt/wrangler/testlib/external_reparent_test.go +++ b/go/vt/wrangler/testlib/external_reparent_test.go @@ -22,11 +22,12 @@ import ( "testing" "time" - "vitess.io/vitess/go/vt/discovery" - "github.com/stretchr/testify/assert" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/vt/discovery" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/topotools" @@ -50,7 +51,7 @@ func TestTabletExternallyReparentedBasic(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -143,7 +144,7 @@ func TestTabletExternallyReparentedToReplica(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -226,7 +227,7 @@ func TestTabletExternallyReparentedWithDifferentMysqlPort(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -319,7 +320,7 @@ func TestTabletExternallyReparentedContinueOnUnexpectedPrimary(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, two good replicas, one bad replica oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -405,7 +406,7 @@ func TestTabletExternallyReparentedRerun(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, a new primary, and a good replica. oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) @@ -509,7 +510,7 @@ func TestRPCTabletExternallyReparentedDemotesPrimaryToConfiguredTabletType(t *te ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary and a new primary oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_SPARE, nil) diff --git a/go/vt/wrangler/testlib/fake_tablet.go b/go/vt/wrangler/testlib/fake_tablet.go index 9c511185769..af5ebad06b1 100644 --- a/go/vt/wrangler/testlib/fake_tablet.go +++ b/go/vt/wrangler/testlib/fake_tablet.go @@ -29,10 +29,12 @@ import ( "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vttablet/grpctmserver" @@ -209,6 +211,8 @@ func (ft *FakeTablet) StartActionLoop(t *testing.T, wr *wrangler.Wrangler) { DBConfigs: &dbconfigs.DBConfigs{}, QueryServiceControl: tabletservermock.NewController(), VREngine: vreplication.NewTestEngine(wr.TopoServer(), ft.Tablet.Alias.Cell, ft.FakeMysqlDaemon, binlogplayer.NewFakeDBClient, binlogplayer.NewFakeDBClient, topoproto.TabletDbName(ft.Tablet), nil), + CollationEnv: collations.MySQL8(), + SQLParser: sqlparser.NewTestParser(), } if err := ft.TM.Start(ft.Tablet, nil); err != nil { t.Fatalf("Error in tablet - %v, err - %v", topoproto.TabletAliasString(ft.Tablet.Alias), err.Error()) diff --git a/go/vt/wrangler/testlib/find_tablet_test.go b/go/vt/wrangler/testlib/find_tablet_test.go index 5b6f26f7056..d6c142d9030 100644 --- a/go/vt/wrangler/testlib/find_tablet_test.go +++ b/go/vt/wrangler/testlib/find_tablet_test.go @@ -21,7 +21,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" @@ -36,7 +38,7 @@ func TestFindTablet(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // Create an old primary, two good replicas oldPrimary := NewFakeTablet(t, wr, "cell1", 0, topodatapb.TabletType_PRIMARY, nil) diff --git a/go/vt/wrangler/testlib/permissions_test.go b/go/vt/wrangler/testlib/permissions_test.go index 4a0e71512f3..37913da6fd2 100644 --- a/go/vt/wrangler/testlib/permissions_test.go +++ b/go/vt/wrangler/testlib/permissions_test.go @@ -22,7 +22,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/sqltypes" @@ -47,7 +49,7 @@ func TestPermissions(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/planned_reparent_shard_test.go b/go/vt/wrangler/testlib/planned_reparent_shard_test.go index 0125e69cac0..39bde50b3e9 100644 --- a/go/vt/wrangler/testlib/planned_reparent_shard_test.go +++ b/go/vt/wrangler/testlib/planned_reparent_shard_test.go @@ -22,8 +22,10 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/vt/mysqlctl" + "vitess.io/vitess/go/vt/sqlparser" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -51,7 +53,7 @@ func TestPlannedReparentShardNoPrimaryProvided(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -167,7 +169,7 @@ func TestPlannedReparentShardNoError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -303,7 +305,7 @@ func TestPlannedReparentInitialization(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -389,7 +391,7 @@ func TestPlannedReparentShardWaitForPositionFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -497,7 +499,7 @@ func TestPlannedReparentShardWaitForPositionTimeout(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -603,7 +605,7 @@ func TestPlannedReparentShardRelayLogError(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -683,7 +685,7 @@ func TestPlannedReparentShardRelayLogErrorStartReplication(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -768,7 +770,7 @@ func TestPlannedReparentShardPromoteReplicaFail(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() @@ -908,7 +910,7 @@ func TestPlannedReparentShardSamePrimary(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/reparent_utils_test.go b/go/vt/wrangler/testlib/reparent_utils_test.go index 0d1d84e89f5..55a7e3b225b 100644 --- a/go/vt/wrangler/testlib/reparent_utils_test.go +++ b/go/vt/wrangler/testlib/reparent_utils_test.go @@ -24,17 +24,16 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/replication" - - "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" - "vitess.io/vitess/go/vt/discovery" - "vitess.io/vitess/go/vt/vtctl/reparentutil" - "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" + "vitess.io/vitess/go/vt/vtctl/reparentutil" + "vitess.io/vitess/go/vt/vtctl/reparentutil/reparenttestutil" "vitess.io/vitess/go/vt/vttablet/tmclient" "vitess.io/vitess/go/vt/wrangler" @@ -51,7 +50,7 @@ func TestShardReplicationStatuses(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -135,7 +134,7 @@ func TestReparentTablet(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets if _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0"); err != nil { @@ -192,7 +191,7 @@ func TestSetReplicationSource(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) // create shard and tablets _, err := ts.GetOrCreateShard(ctx, "test_keyspace", "0") diff --git a/go/vt/wrangler/testlib/shard_test.go b/go/vt/wrangler/testlib/shard_test.go index a0b1b0a3562..244c7a1fa44 100644 --- a/go/vt/wrangler/testlib/shard_test.go +++ b/go/vt/wrangler/testlib/shard_test.go @@ -21,7 +21,9 @@ import ( "strings" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topotools" @@ -35,7 +37,7 @@ func TestDeleteShardCleanup(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/version_test.go b/go/vt/wrangler/testlib/version_test.go index 102bcdfe6e5..3ba291c9f33 100644 --- a/go/vt/wrangler/testlib/version_test.go +++ b/go/vt/wrangler/testlib/version_test.go @@ -25,7 +25,9 @@ import ( "testing" "time" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/discovery" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/logutil" @@ -70,7 +72,7 @@ func TestVersion(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() ts := memorytopo.NewServer(ctx, "cell1", "cell2") - wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient()) + wr := wrangler.New(logutil.NewConsoleLogger(), ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) vp := NewVtctlPipe(t, ts) defer vp.Close() diff --git a/go/vt/wrangler/testlib/vtctl_pipe.go b/go/vt/wrangler/testlib/vtctl_pipe.go index 448f248821a..8eef9ed5fe8 100644 --- a/go/vt/wrangler/testlib/vtctl_pipe.go +++ b/go/vt/wrangler/testlib/vtctl_pipe.go @@ -30,8 +30,10 @@ import ( "github.com/stretchr/testify/require" "google.golang.org/grpc" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/vt/logutil" "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctlserver" "vitess.io/vitess/go/vt/vtctl/vtctlclient" @@ -76,7 +78,7 @@ func NewVtctlPipe(t *testing.T, ts *topo.Server) *VtctlPipe { // Create a gRPC server and listen on the port server := grpc.NewServer() - grpcvtctlserver.StartServer(server, ts) + grpcvtctlserver.StartServer(server, ts, collations.MySQL8(), sqlparser.NewTestParser()) go server.Serve(listener) // Create a VtctlClient gRPC client to talk to the fake server diff --git a/go/vt/wrangler/traffic_switcher.go b/go/vt/wrangler/traffic_switcher.go index 498e83a2a3a..a204eff9453 100644 --- a/go/vt/wrangler/traffic_switcher.go +++ b/go/vt/wrangler/traffic_switcher.go @@ -28,6 +28,8 @@ import ( "golang.org/x/sync/errgroup" + "vitess.io/vitess/go/mysql/collations" + "vitess.io/vitess/go/json2" "vitess.io/vitess/go/maps2" "vitess.io/vitess/go/sqlescape" @@ -136,6 +138,7 @@ type trafficSwitcher struct { func (ts *trafficSwitcher) TopoServer() *topo.Server { return ts.wr.ts } func (ts *trafficSwitcher) TabletManagerClient() tmclient.TabletManagerClient { return ts.wr.tmc } +func (ts *trafficSwitcher) CollationEnv() *collations.Environment { return ts.wr.collationEnv } func (ts *trafficSwitcher) Logger() logutil.Logger { return ts.wr.logger } func (ts *trafficSwitcher) VReplicationExec(ctx context.Context, alias *topodatapb.TabletAlias, query string) (*querypb.QueryResult, error) { return ts.wr.VReplicationExec(ctx, alias, query) @@ -221,7 +224,7 @@ func (wr *Wrangler) getWorkflowState(ctx context.Context, targetKeyspace, workfl return nil, nil, err } - ws := workflow.NewServer(wr.ts, wr.tmc) + ws := workflow.NewServer(wr.ts, wr.tmc, wr.parser) state := &workflow.State{ Workflow: workflowName, SourceKeyspace: ts.SourceKeyspaceName(), @@ -484,11 +487,11 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts, ws, err := wr.getWorkflowState(ctx, targetKeyspace, workflowName) _ = ws if err != nil { - handleError("failed to get the current workflow state", err) + return handleError("failed to get the current workflow state", err) } if ts == nil { errorMsg := fmt.Sprintf("workflow %s not found in keyspace %s", workflowName, targetKeyspace) - handleError("failed to get the current workflow state", fmt.Errorf(errorMsg)) + return handleError("failed to get the current workflow state", fmt.Errorf(errorMsg)) } var sw iswitcher @@ -505,7 +508,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa ts.Logger().Infof("Built switching metadata: %+v", ts) if err := ts.validate(ctx); err != nil { - handleError("workflow validation failed", err) + return handleError("workflow validation failed", err) } if reverseReplication { @@ -553,7 +556,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa } if !journalsExist { ts.Logger().Infof("No previous journals were found. Proceeding normally.") - sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel) + sm, err := workflow.BuildStreamMigrator(ctx, ts, cancel, wr.parser) if err != nil { return handleError("failed to migrate the workflow streams", err) } @@ -652,7 +655,7 @@ func (wr *Wrangler) SwitchWrites(ctx context.Context, targetKeyspace, workflowNa return handleError("failed to update the routing rules", err) } if err := sw.streamMigraterfinalize(ctx, ts, sourceWorkflows); err != nil { - handleError("failed to finalize the traffic switch", err) + return handleError("failed to finalize the traffic switch", err) } if reverseReplication { if err := sw.startReverseVReplication(ctx); err != nil { @@ -953,7 +956,7 @@ func (wr *Wrangler) buildTrafficSwitcher(ctx context.Context, targetKeyspace, wo if err != nil { return nil, err } - ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace) + ts.sourceKSSchema, err = vindexes.BuildKeyspaceSchema(vs, ts.sourceKeyspace, wr.parser) if err != nil { return nil, err } @@ -1147,7 +1150,7 @@ func (ts *trafficSwitcher) switchShardReads(ctx context.Context, cells []string, // If so, it also returns the list of sourceWorkflows that need to be switched. func (ts *trafficSwitcher) checkJournals(ctx context.Context) (journalsExist bool, sourceWorkflows []string, err error) { var ( - ws = workflow.NewServer(ts.TopoServer(), ts.TabletManagerClient()) + ws = workflow.NewServer(ts.TopoServer(), ts.TabletManagerClient(), ts.wr.parser) mu sync.Mutex ) @@ -1612,7 +1615,8 @@ func (ts *trafficSwitcher) deleteShardRoutingRules(ctx context.Context) error { func (ts *trafficSwitcher) startReverseVReplication(ctx context.Context) error { return ts.ForAllSources(func(source *workflow.MigrationSource) error { - query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s", encodeString(source.GetPrimary().DbName())) + query := fmt.Sprintf("update _vt.vreplication set state='Running', message='' where db_name=%s and workflow=%s", + encodeString(source.GetPrimary().DbName()), encodeString(ts.ReverseWorkflowName())) _, err := ts.VReplicationExec(ctx, source.GetPrimary().Alias, query) return err }) diff --git a/go/vt/wrangler/traffic_switcher_env_test.go b/go/vt/wrangler/traffic_switcher_env_test.go index 572b2b4a9e6..7ec1e1f2048 100644 --- a/go/vt/wrangler/traffic_switcher_env_test.go +++ b/go/vt/wrangler/traffic_switcher_env_test.go @@ -27,6 +27,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" "vitess.io/vitess/go/mysql/replication" "vitess.io/vitess/go/sqlescape" @@ -118,7 +119,7 @@ func newTestTableMigrater(ctx context.Context, t *testing.T) *testMigraterEnv { func newTestTableMigraterCustom(ctx context.Context, t *testing.T, sourceShards, targetShards []string, fmtQuery string) *testMigraterEnv { tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = sourceShards tme.targetShards = targetShards @@ -382,7 +383,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar require.Greater(t, len(shards), 1, "shard by shard migrations can only be done on sharded keyspaces") tme := &testMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.wr.sem = semaphore.NewWeighted(1) tme.sourceShards = shards tme.targetShards = shards @@ -538,7 +539,7 @@ func newTestTablePartialMigrater(ctx context.Context, t *testing.T, shards, shar func newTestShardMigrater(ctx context.Context, t *testing.T, sourceShards, targetShards []string) *testShardMigraterEnv { tme := &testShardMigraterEnv{} tme.ts = memorytopo.NewServer(ctx, "cell1", "cell2") - tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient()) + tme.wr = New(logutil.NewConsoleLogger(), tme.ts, tmclient.NewTabletManagerClient(), collations.MySQL8(), sqlparser.NewTestParser()) tme.sourceShards = sourceShards tme.targetShards = targetShards tme.tmeDB = fakesqldb.New(t) @@ -862,7 +863,7 @@ func (tme *testShardMigraterEnv) expectStartReverseVReplication() { // NOTE: this is not a faithful reproduction of what should happen. // The ids returned are not accurate. for _, dbclient := range tme.dbSourceClients { - dbclient.addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + dbclient.addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) dbclient.addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) dbclient.addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) dbclient.addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) diff --git a/go/vt/wrangler/traffic_switcher_test.go b/go/vt/wrangler/traffic_switcher_test.go index f7aebed185a..33efe7032b6 100644 --- a/go/vt/wrangler/traffic_switcher_test.go +++ b/go/vt/wrangler/traffic_switcher_test.go @@ -434,11 +434,11 @@ func TestTableMigrateMainflow(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -731,11 +731,11 @@ func TestShardMigrateMainflow(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -1233,11 +1233,11 @@ func TestTableMigrateJournalExists(t *testing.T) { tme.dbSourceClients[1].addQueryRE(journal2, &sqltypes.Result{}, nil) // mi.startReverseVReplication - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks1' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -1312,11 +1312,11 @@ func TestShardMigrateJournalExists(t *testing.T) { tme.dbSourceClients[1].addQueryRE(journal2, &sqltypes.Result{}, nil) // mi.startReverseVReplication - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) @@ -2043,11 +2043,11 @@ func TestShardMigrateNoAvailableTabletsForReverseReplication(t *testing.T) { createJournals() startReverseVReplication := func() { - tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[0].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[0].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[0].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) - tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks'", resultid34, nil) + tme.dbSourceClients[1].addQuery("select id from _vt.vreplication where db_name = 'vt_ks' and workflow = 'test_reverse'", resultid34, nil) tme.dbSourceClients[1].addQuery("update _vt.vreplication set state = 'Running', message = '' where id in (3, 4)", &sqltypes.Result{}, nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 3", runningResult(3), nil) tme.dbSourceClients[1].addQuery("select * from _vt.vreplication where id = 4", runningResult(4), nil) diff --git a/go/vt/wrangler/vdiff.go b/go/vt/wrangler/vdiff.go index 5de1a4b1d3f..35b1e72a459 100644 --- a/go/vt/wrangler/vdiff.go +++ b/go/vt/wrangler/vdiff.go @@ -110,6 +110,9 @@ type vdiff struct { tables []string sourceTimeZone string targetTimeZone string + + collationEnv *collations.Environment + parser *sqlparser.Parser } // compareColInfo contains the metadata for a column of the table being diffed @@ -142,6 +145,9 @@ type tableDiffer struct { // source Primitive and targetPrimitive are used for streaming sourcePrimitive engine.Primitive targetPrimitive engine.Primitive + + collationEnv *collations.Environment + parser *sqlparser.Parser } // shardStreamer streams rows from one shard. This works for @@ -218,6 +224,8 @@ func (wr *Wrangler) VDiff(ctx context.Context, targetKeyspace, workflowName, sou tables: includeTables, sourceTimeZone: ts.sourceTimeZone, targetTimeZone: ts.targetTimeZone, + collationEnv: wr.collationEnv, + parser: wr.parser, } for shard, source := range ts.Sources() { df.sources[shard] = &shardStreamer{ @@ -485,8 +493,8 @@ func (df *vdiff) buildVDiffPlan(ctx context.Context, filter *binlogdatapb.Filter // findPKs identifies PKs, determines any collations to be used for // them, and removes them from the columns used for data comparison. -func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer) (sqlparser.OrderBy, error) { - columnCollations, err := getColumnCollations(table) +func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser.Select, td *tableDiffer, collationEnv *collations.Environment, parser *sqlparser.Parser) (sqlparser.OrderBy, error) { + columnCollations, err := getColumnCollations(table, collationEnv, parser) if err != nil { return nil, err } @@ -530,9 +538,8 @@ func findPKs(table *tabletmanagerdatapb.TableDefinition, targetSelect *sqlparser // getColumnCollations determines the proper collation to use for each // column in the table definition leveraging MySQL's collation inheritance // rules. -func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string]collations.ID, error) { - collationEnv := collations.Local() - createstmt, err := sqlparser.Parse(table.Schema) +func getColumnCollations(table *tabletmanagerdatapb.TableDefinition, collationEnv *collations.Environment, parser *sqlparser.Parser) (map[string]collations.ID, error) { + createstmt, err := parser.Parse(table.Schema) if err != nil { return nil, err } @@ -569,7 +576,7 @@ func getColumnCollations(table *tabletmanagerdatapb.TableDefinition) (map[string } // The table is using the global default charset and collation and // we inherit that. - return collations.Default() + return collationEnv.DefaultConnectionCharset() } columnCollations := make(map[string]collations.ID) @@ -646,7 +653,7 @@ func getColumnNameForSelectExpr(selectExpression sqlparser.SelectExpr) (string, // buildTablePlan builds one tableDiffer. func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, query string) (*tableDiffer, error) { - statement, err := sqlparser.Parse(query) + statement, err := df.parser.Parse(query) if err != nil { return nil, err } @@ -655,7 +662,9 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer return nil, fmt.Errorf("unexpected: %v", sqlparser.String(statement)) } td := &tableDiffer{ - targetTable: table.Name, + targetTable: table.Name, + collationEnv: df.collationEnv, + parser: df.parser, } sourceSelect := &sqlparser.Select{} targetSelect := &sqlparser.Select{} @@ -696,7 +705,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer aggregates = append(aggregates, engine.NewAggregateParam( /*opcode*/ opcode.AggregateSum, /*offset*/ len(sourceSelect.SelectExprs)-1, - /*alias*/ "")) + /*alias*/ "", df.collationEnv)) } } default: @@ -735,7 +744,7 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer }, } - orderby, err := findPKs(table, targetSelect, td) + orderby, err := findPKs(table, targetSelect, td, df.collationEnv, df.parser) if err != nil { return nil, err } @@ -751,31 +760,32 @@ func (df *vdiff) buildTablePlan(table *tabletmanagerdatapb.TableDefinition, quer td.sourceExpression = sqlparser.String(sourceSelect) td.targetExpression = sqlparser.String(targetSelect) - td.sourcePrimitive = newMergeSorter(df.sources, td.comparePKs) - td.targetPrimitive = newMergeSorter(df.targets, td.comparePKs) + td.sourcePrimitive = newMergeSorter(df.sources, td.comparePKs, df.collationEnv) + td.targetPrimitive = newMergeSorter(df.targets, td.comparePKs, df.collationEnv) // If there were aggregate expressions, we have to re-aggregate // the results, which engine.OrderedAggregate can do. if len(aggregates) != 0 { td.sourcePrimitive = &engine.OrderedAggregate{ - Aggregates: aggregates, - GroupByKeys: pkColsToGroupByParams(td.pkCols), - Input: td.sourcePrimitive, + Aggregates: aggregates, + GroupByKeys: pkColsToGroupByParams(td.pkCols, td.collationEnv), + Input: td.sourcePrimitive, + CollationEnv: df.collationEnv, } } return td, nil } -func pkColsToGroupByParams(pkCols []int) []*engine.GroupByParams { +func pkColsToGroupByParams(pkCols []int, collationEnv *collations.Environment) []*engine.GroupByParams { var res []*engine.GroupByParams for _, col := range pkCols { - res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: evalengine.Type{}}) + res = append(res, &engine.GroupByParams{KeyCol: col, WeightStringCol: -1, Type: evalengine.Type{}, CollationEnv: collationEnv}) } return res } // newMergeSorter creates an engine.MergeSort based on the shard streamers and pk columns. -func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo) *engine.MergeSort { +func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compareColInfo, collationEnv *collations.Environment) *engine.MergeSort { prims := make([]engine.StreamExecutor, 0, len(participants)) for _, participant := range participants { prims = append(prims, participant) @@ -788,7 +798,7 @@ func newMergeSorter(participants map[string]*shardStreamer, comparePKs []compare if cpk.collation != collations.Unknown { collation = cpk.collation } - ob = append(ob, evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation)}) + ob = append(ob, evalengine.OrderByParams{Col: cpk.colIndex, WeightStringCol: weightStringCol, Type: evalengine.NewType(sqltypes.Unknown, collation), CollationEnv: collationEnv}) } return &engine.MergeSort{ Primitives: prims, @@ -1309,7 +1319,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com if col.collation == collations.Unknown { collationID = collations.CollationBinaryID } - c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], collationID) + c, err = evalengine.NullsafeCompare(sourceRow[compareIndex], targetRow[compareIndex], td.collationEnv, collationID) if err != nil { return 0, err } @@ -1323,7 +1333,7 @@ func (td *tableDiffer) compare(sourceRow, targetRow []sqltypes.Value, cols []com func (td *tableDiffer) genRowDiff(queryStmt string, row []sqltypes.Value, debug, onlyPks bool) (*RowDiff, error) { drp := &RowDiff{} drp.Row = make(map[string]sqltypes.Value) - statement, err := sqlparser.Parse(queryStmt) + statement, err := td.parser.Parse(queryStmt) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vdiff_env_test.go b/go/vt/wrangler/vdiff_env_test.go index 01f3a3a0f9e..cf76318339a 100644 --- a/go/vt/wrangler/vdiff_env_test.go +++ b/go/vt/wrangler/vdiff_env_test.go @@ -23,11 +23,12 @@ import ( "sync" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/mysql/fakesqldb" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -78,7 +79,7 @@ func newTestVDiffEnv(t testing.TB, ctx context.Context, sourceShards, targetShar tabletType: topodatapb.TabletType_REPLICA, tmc: newTestVDiffTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) // Generate a unique dialer name. dialerName := fmt.Sprintf("VDiffTest-%s-%d", t.Name(), rand.Intn(1000000000)) diff --git a/go/vt/wrangler/vdiff_test.go b/go/vt/wrangler/vdiff_test.go index 72e9e215c17..cae4f6afca1 100644 --- a/go/vt/wrangler/vdiff_test.go +++ b/go/vt/wrangler/vdiff_test.go @@ -37,6 +37,7 @@ import ( ) func TestVDiffPlanSuccess(t *testing.T) { + collationEnv := collations.MySQL8() schm := &tabletmanagerdatapb.SchemaDefinition{ TableDefinitions: []*tabletmanagerdatapb.TableDefinition{{ Name: "t1", @@ -95,8 +96,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { input: &binlogdatapb.Rule{ @@ -112,8 +114,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { input: &binlogdatapb.Rule{ @@ -129,8 +132,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { input: &binlogdatapb.Rule{ @@ -146,8 +150,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{1, collations.Unknown, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { input: &binlogdatapb.Rule{ @@ -163,8 +168,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // non-pk text column. @@ -181,8 +187,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // non-pk text column, different order. @@ -199,8 +206,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{1, collations.Unknown, true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // pk text column. @@ -213,12 +221,13 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select textcol, c2 from pktext order by textcol asc", targetExpression: "select textcol, c2 from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Default(), true}, {1, collations.Unknown, false}}, - comparePKs: []compareColInfo{{0, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), true}, {1, collations.Unknown, false}}, + comparePKs: []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + collationEnv: collationEnv, }, }, { // pk text column, different order. @@ -231,12 +240,13 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, - comparePKs: []compareColInfo{{1, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collationEnv.DefaultConnectionCharset(), true}}, + comparePKs: []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + collationEnv: collationEnv, }, }, { // text column as expression. @@ -249,12 +259,13 @@ func TestVDiffPlanSuccess(t *testing.T) { targetTable: "pktext", sourceExpression: "select c2, a + b as textcol from pktext order by textcol asc", targetExpression: "select c2, textcol from pktext order by textcol asc", - compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collations.Default(), true}}, - comparePKs: []compareColInfo{{1, collations.Default(), true}}, + compareCols: []compareColInfo{{0, collations.Unknown, false}, {1, collationEnv.DefaultConnectionCharset(), true}}, + comparePKs: []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), true}}, pkCols: []int{1}, selectPks: []int{1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collations.Default(), false}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{1, collationEnv.DefaultConnectionCharset(), false}}, collationEnv), + collationEnv: collationEnv, }, }, { input: &binlogdatapb.Rule{ @@ -269,8 +280,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, pkCols: []int{0, 1}, selectPks: []int{0, 1}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}, {1, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // in_keyrange @@ -287,8 +299,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // in_keyrange on RHS of AND. @@ -306,8 +319,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collationEnv, }, }, { // in_keyrange on LHS of AND. @@ -325,8 +339,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }, { // in_keyrange on cascaded AND expression @@ -344,8 +359,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }, { // in_keyrange parenthesized @@ -363,8 +379,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }, { // group by @@ -381,8 +398,9 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }, { // aggregations @@ -401,13 +419,15 @@ func TestVDiffPlanSuccess(t *testing.T) { selectPks: []int{0}, sourcePrimitive: &engine.OrderedAggregate{ Aggregates: []*engine.AggregateParams{ - engine.NewAggregateParam(opcode.AggregateSum, 2, ""), - engine.NewAggregateParam(opcode.AggregateSum, 3, ""), + engine.NewAggregateParam(opcode.AggregateSum, 2, "", collationEnv), + engine.NewAggregateParam(opcode.AggregateSum, 3, "", collationEnv), }, - GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1}}, - Input: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + GroupByKeys: []*engine.GroupByParams{{KeyCol: 0, WeightStringCol: -1, CollationEnv: collations.MySQL8()}}, + Input: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + CollationEnv: collations.MySQL8(), }, - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }, { input: &binlogdatapb.Rule{ @@ -423,15 +443,16 @@ func TestVDiffPlanSuccess(t *testing.T) { comparePKs: []compareColInfo{{0, collations.Unknown, true}}, pkCols: []int{0}, selectPks: []int{0}, - sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), - targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}), + sourcePrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + targetPrimitive: newMergeSorter(nil, []compareColInfo{{0, collations.Unknown, true}}, collationEnv), + collationEnv: collations.MySQL8(), }, }} for _, tcase := range testcases { t.Run(tcase.input.Filter, func(t *testing.T) { filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} - df := &vdiff{sourceTimeZone: tcase.sourceTimeZone, targetTimeZone: "UTC"} + df := &vdiff{sourceTimeZone: tcase.sourceTimeZone, targetTimeZone: "UTC", collationEnv: collations.MySQL8()} err := df.buildVDiffPlan(context.Background(), filter, schm, nil) require.NoError(t, err, tcase.input) require.Equal(t, 1, len(df.differs), tcase.input) @@ -486,7 +507,7 @@ func TestVDiffPlanFailure(t *testing.T) { }} for _, tcase := range testcases { filter := &binlogdatapb.Filter{Rules: []*binlogdatapb.Rule{tcase.input}} - df := &vdiff{} + df := &vdiff{collationEnv: collations.MySQL8()} err := df.buildVDiffPlan(context.Background(), filter, schm, nil) assert.EqualError(t, err, tcase.err, tcase.input) } @@ -1079,7 +1100,7 @@ func TestVDiffFindPKs(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { - _, err := findPKs(tc.table, tc.targetSelect, tc.tdIn) + _, err := findPKs(tc.table, tc.targetSelect, tc.tdIn, collations.MySQL8(), sqlparser.NewTestParser()) require.NoError(t, err) require.EqualValues(t, tc.tdOut, tc.tdIn) }) @@ -1139,7 +1160,7 @@ func TestVDiffPlanInclude(t *testing.T) { } func TestGetColumnCollations(t *testing.T) { - collationEnv := collations.Local() + collationEnv := collations.MySQL8() tests := []struct { name string table *tabletmanagerdatapb.TableDefinition @@ -1160,7 +1181,7 @@ func TestGetColumnCollations(t *testing.T) { }, want: map[string]collations.ID{ "c1": collations.Unknown, - "name": collations.Default(), + "name": collationEnv.DefaultConnectionCharset(), }, }, { @@ -1169,8 +1190,8 @@ func TestGetColumnCollations(t *testing.T) { Schema: "create table t1 (c1 varchar(10), name varchar(10), primary key(c1))", }, want: map[string]collations.ID{ - "c1": collations.Default(), - "name": collations.Default(), + "c1": collationEnv.DefaultConnectionCharset(), + "name": collationEnv.DefaultConnectionCharset(), }, }, { @@ -1180,7 +1201,7 @@ func TestGetColumnCollations(t *testing.T) { }, want: map[string]collations.ID{ "c1": collations.Unknown, - "name": collations.Default(), + "name": collationEnv.DefaultConnectionCharset(), }, }, { @@ -1237,7 +1258,7 @@ func TestGetColumnCollations(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getColumnCollations(tt.table) + got, err := getColumnCollations(tt.table, collationEnv, sqlparser.NewTestParser()) if (err != nil) != tt.wantErr { t.Errorf("getColumnCollations() error = %v, wantErr = %t", err, tt.wantErr) return diff --git a/go/vt/wrangler/vexec.go b/go/vt/wrangler/vexec.go index c705ad3f6f1..41f1eaa9396 100644 --- a/go/vt/wrangler/vexec.go +++ b/go/vt/wrangler/vexec.go @@ -275,7 +275,7 @@ func (vx *vexec) execCallback(callback func(context.Context, *topo.TabletInfo) ( // parseQuery parses the input query func (vx *vexec) parseQuery() (err error) { - if vx.stmt, err = sqlparser.Parse(vx.query); err != nil { + if vx.stmt, err = vx.wr.parser.Parse(vx.query); err != nil { return err } if vx.tableName, err = extractTableName(vx.stmt); err != nil { @@ -835,7 +835,7 @@ func (wr *Wrangler) ListAllWorkflows(ctx context.Context, keyspace string, activ where = " where state <> 'Stopped'" } query := "select distinct workflow from _vt.vreplication" + where - vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc) + vx := vtctldvexec.NewVExec(keyspace, "", wr.ts, wr.tmc, wr.parser) results, err := vx.QueryContext(ctx, query) if err != nil { return nil, err diff --git a/go/vt/wrangler/vexec_plan.go b/go/vt/wrangler/vexec_plan.go index 5b68d9ada5f..6178844c398 100644 --- a/go/vt/wrangler/vexec_plan.go +++ b/go/vt/wrangler/vexec_plan.go @@ -259,7 +259,7 @@ func (vx *vexec) buildUpdatePlan(ctx context.Context, planner vexecPlanner, upd } } if templates := plannerParams.updateTemplates; len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.parser.QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } @@ -311,7 +311,7 @@ func (vx *vexec) buildInsertPlan(ctx context.Context, planner vexecPlanner, ins return nil, fmt.Errorf("query not supported by vexec: %s", sqlparser.String(ins)) } if len(templates) > 0 { - match, err := sqlparser.QueryMatchesTemplates(vx.query, templates) + match, err := vx.wr.parser.QueryMatchesTemplates(vx.query, templates) if err != nil { return nil, err } diff --git a/go/vt/wrangler/vexec_test.go b/go/vt/wrangler/vexec_test.go index ead2be6a56f..254d42ee49e 100644 --- a/go/vt/wrangler/vexec_test.go +++ b/go/vt/wrangler/vexec_test.go @@ -27,12 +27,14 @@ import ( "github.com/stretchr/testify/require" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/textutil" "vitess.io/vitess/go/vt/logutil" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" topodatapb "vitess.io/vitess/go/vt/proto/topodata" + "vitess.io/vitess/go/vt/sqlparser" ) func TestVExec(t *testing.T) { @@ -44,7 +46,7 @@ func TestVExec(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, time.Now().Unix()) defer env.close() var logger = logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) vx := newVExec(ctx, workflow, keyspace, query, wr) err := vx.getPrimaries() @@ -189,7 +191,7 @@ func TestWorkflowListStreams(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) _, err := wr.WorkflowAction(ctx, workflow, keyspace, "listall", false, nil) require.NoError(t, err) @@ -365,7 +367,7 @@ func TestWorkflowListAll(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) workflows, err := wr.ListAllWorkflows(ctx, keyspace, true) require.Nil(t, err) @@ -386,7 +388,7 @@ func TestVExecValidations(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 0) defer env.close() - wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + wr := New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) vx := newVExec(ctx, workflow, keyspace, query, wr) @@ -472,7 +474,7 @@ func TestWorkflowUpdate(t *testing.T) { env := newWranglerTestEnv(t, ctx, []string{"0"}, []string{"-80", "80-"}, "", nil, 1234) defer env.close() logger := logutil.NewMemoryLogger() - wr := New(logger, env.topoServ, env.tmc) + wr := New(logger, env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) nullSlice := textutil.SimulatedNullStringSlice // Used to represent a non-provided value nullOnDDL := binlogdatapb.OnDDLAction(textutil.SimulatedNullInt) // Used to represent a non-provided value tests := []struct { diff --git a/go/vt/wrangler/wrangler.go b/go/vt/wrangler/wrangler.go index dbb046a36b3..26332b58bd9 100644 --- a/go/vt/wrangler/wrangler.go +++ b/go/vt/wrangler/wrangler.go @@ -23,8 +23,10 @@ import ( "golang.org/x/sync/semaphore" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" "vitess.io/vitess/go/vt/vttablet/tmclient" @@ -57,16 +59,21 @@ type Wrangler struct { VExecFunc func(ctx context.Context, workflow, keyspace, query string, dryRun bool) (map[*topo.TabletInfo]*sqltypes.Result, error) // Limt the number of concurrent background goroutines if needed. sem *semaphore.Weighted + + collationEnv *collations.Environment + parser *sqlparser.Parser } // New creates a new Wrangler object. -func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { +func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient, collationEnv *collations.Environment, parser *sqlparser.Parser) *Wrangler { return &Wrangler{ - logger: logger, - ts: ts, - tmc: tmc, - vtctld: grpcvtctldserver.NewVtctldServer(ts), - sourceTs: ts, + logger: logger, + ts: ts, + tmc: tmc, + vtctld: grpcvtctldserver.NewVtctldServer(ts, parser), + sourceTs: ts, + collationEnv: collationEnv, + parser: parser, } } @@ -74,11 +81,13 @@ func New(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClien // in production. func NewTestWrangler(logger logutil.Logger, ts *topo.Server, tmc tmclient.TabletManagerClient) *Wrangler { return &Wrangler{ - logger: logger, - ts: ts, - tmc: tmc, - vtctld: grpcvtctldserver.NewTestVtctldServer(ts, tmc), - sourceTs: ts, + logger: logger, + ts: ts, + tmc: tmc, + vtctld: grpcvtctldserver.NewTestVtctldServer(ts, tmc), + sourceTs: ts, + collationEnv: collations.MySQL8(), + parser: sqlparser.NewTestParser(), } } @@ -109,3 +118,8 @@ func (wr *Wrangler) SetLogger(logger logutil.Logger) { func (wr *Wrangler) Logger() logutil.Logger { return wr.logger } + +// SQLParser returns the parser this wrangler is using. +func (wr *Wrangler) SQLParser() *sqlparser.Parser { + return wr.parser +} diff --git a/go/vt/wrangler/wrangler_env_test.go b/go/vt/wrangler/wrangler_env_test.go index 4dd5e342c35..c62a1d1bf50 100644 --- a/go/vt/wrangler/wrangler_env_test.go +++ b/go/vt/wrangler/wrangler_env_test.go @@ -23,10 +23,12 @@ import ( "sync" "testing" + "vitess.io/vitess/go/mysql/collations" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/vttablet/queryservice" @@ -68,7 +70,7 @@ func newWranglerTestEnv(t testing.TB, ctx context.Context, sourceShards, targetS tabletType: topodatapb.TabletType_REPLICA, tmc: newTestWranglerTMClient(), } - env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc) + env.wr = New(logutil.NewConsoleLogger(), env.topoServ, env.tmc, collations.MySQL8(), sqlparser.NewTestParser()) env.tmc.tablets = make(map[int]*testWranglerTablet) // Generate a unique dialer name. diff --git a/go/vt/zkctl/zkconf.go b/go/vt/zkctl/zkconf.go index 7361408c3fc..92be0eb492e 100644 --- a/go/vt/zkctl/zkconf.go +++ b/go/vt/zkctl/zkconf.go @@ -104,18 +104,16 @@ func MakeZooCfg(cnfFiles []string, cnf *ZkConfig, header string) (string, error) for _, line := range strings.Split(header, "\n") { fmt.Fprintf(&myTemplateSource, "## %v\n", strings.TrimSpace(line)) } - var dataErr error + for _, path := range cnfFiles { - data, dataErr := os.ReadFile(path) - if dataErr != nil { + data, err := os.ReadFile(path) + if err != nil { continue } + myTemplateSource.WriteString("## " + path + "\n") myTemplateSource.Write(data) } - if dataErr != nil { - return "", dataErr - } myTemplateSource.WriteString("\n") // in case `data` did not end with a newline for _, extra := range cnf.Extra { @@ -126,9 +124,9 @@ func MakeZooCfg(cnfFiles []string, cnf *ZkConfig, header string) (string, error) if err != nil { return "", err } + var cnfData strings.Builder - err = myTemplate.Execute(&cnfData, cnf) - if err != nil { + if err := myTemplate.Execute(&cnfData, cnf); err != nil { return "", err } return cnfData.String(), nil diff --git a/test/config.json b/test/config.json index 7aafcaf1a80..cc68301a052 100644 --- a/test/config.json +++ b/test/config.json @@ -507,7 +507,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_aggregation": { "File": "unused.go", @@ -516,7 +516,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_foundrows": { "File": "unused.go", @@ -525,7 +525,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_informationschema": { "File": "unused.go", @@ -534,7 +534,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_misc": { "File": "unused.go", @@ -543,7 +543,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_timeout": { "File": "unused.go", @@ -552,7 +552,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_normalize": { "File": "unused.go", @@ -561,7 +561,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_no_scatter": { "File": "unused.go", @@ -570,7 +570,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_orderby": { "File": "unused.go", @@ -606,7 +606,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_vexplain": { "File": "unused.go", @@ -615,7 +615,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 2, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_reference": { "File": "unused.go", @@ -624,7 +624,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_queries_random": { "File": "unused.go", @@ -633,7 +633,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_kill": { "File": "unused.go", @@ -642,7 +642,7 @@ "Manual": false, "Shard": "vtgate_queries", "RetryMax": 1, - "Tags": [] + "Tags": ["upgrade_downgrade_query_serving_queries"] }, "vtgate_concurrentdml": { "File": "unused.go",