diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_bench.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_bench.yml new file mode 100644 index 00000000000..d44e67aa205 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_bench.yml @@ -0,0 +1,175 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl_bench) +on: [push, pull_request] +concurrency: + group: format('{0}-{1}', ${{ github.ref }}, 'Cluster (onlineddl_vrepl_bench)') + cancel-in-progress: true + +permissions: read-all + +env: + LAUNCHABLE_ORGANIZATION: "vitess" + LAUNCHABLE_WORKSPACE: "vitess-app" + GITHUB_PR_HEAD_SHA: "${{ github.event.pull_request.head.sha }}" + +jobs: + build: + name: Run endtoend tests on Cluster (onlineddl_vrepl_bench) + runs-on: gh-hosted-runners-16cores-1-24.04 + + steps: + - name: Skip CI + run: | + if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then + echo "skipping CI due to the 'Skip CI' label" + exit 1 + fi + + - name: Check if workflow needs to be skipped + id: skip-workflow + run: | + skip='false' + if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then + skip='true' + fi + echo Skip ${skip} + echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT + + PR_DATA=$(curl -s\ + -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.pull_request.number }}") + draft=$(echo "$PR_DATA" | jq .draft -r) + echo "is_draft=${draft}" >> $GITHUB_OUTPUT + + - name: Check out code + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Check for changes in relevant files + if: steps.skip-workflow.outputs.skip-workflow == 'false' + uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1 + id: changes + with: + token: '' + filters: | + end_to_end: + - 'go/**/*.go' + - 'go/vt/sidecardb/**/*.sql' + - 'go/test/endtoend/onlineddl/vrepl_suite/**' + - 'test.go' + - 'Makefile' + - 'build.env' + - 'go.sum' + - 'go.mod' + - 'proto/*.proto' + - 'tools/**' + - 'config/**' + - 'bootstrap.sh' + - '.github/workflows/cluster_endtoend_onlineddl_vrepl_bench.yml' + - 'go/test/endtoend/onlineddl/vrepl_suite/testdata' + + - name: Set up Go + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2 + with: + go-version-file: go.mod + + - name: Set up python + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + + - name: Tune the OS + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + # Limit local port range to not use ports that overlap with server side + # ports that we listen on. + sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535" + # Increase the asynchronous non-blocking I/O. More information at https://dev.mysql.com/doc/refman/5.7/en/innodb-parameters.html#sysvar_innodb_use_native_aio + echo "fs.aio-max-nr = 1048576" | sudo tee -a /etc/sysctl.conf + sudo sysctl -p /etc/sysctl.conf + + - name: Get dependencies + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + run: | + + # Get key to latest MySQL repo + sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C + # Setup MySQL 8.0 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.33-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* + sudo apt-get -qq update + + # We have to install this old version of libaio1 in case we end up testing with MySQL 5.7. See also: + # https://bugs.launchpad.net/ubuntu/+source/libaio/+bug/2067501 + curl -L -O http://mirrors.kernel.org/ubuntu/pool/main/liba/libaio/libaio1_0.3.112-13build1_amd64.deb + sudo dpkg -i libaio1_0.3.112-13build1_amd64.deb + # libtinfo5 is also needed for older MySQL 5.7 builds. + curl -L -O http://mirrors.kernel.org/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb + sudo dpkg -i libtinfo5_6.3-2ubuntu0.1_amd64.deb + + # Install everything else we need, and configure + sudo apt-get -qq install -y mysql-server mysql-shell mysql-client make unzip g++ etcd-client etcd-server curl git wget eatmydata xz-utils libncurses6 + + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + # install JUnit report formatter + go install github.com/vitessio/go-junit-report@HEAD + + - name: Setup launchable dependencies + if: steps.skip-workflow.outputs.is_draft == 'false' && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && github.base_ref == 'main' + run: | + # Get Launchable CLI installed. If you can, make it a part of the builder image to speed things up + pip3 install --user launchable~=1.0 > /dev/null + + # verify that launchable setup is all correct. + launchable verify || true + + # Tell Launchable about the build you are producing and testing + launchable record build --name "$GITHUB_RUN_ID" --no-commit-collection --source . + + - name: Run cluster endtoend test + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' + timeout-minutes: 45 + run: | + # We set the VTDATAROOT to the /tmp folder to reduce the file path of mysql.sock file + # which musn't be more than 107 characters long. + export VTDATAROOT="/tmp/" + source build.env + + set -exo pipefail + + cat <<-EOF>>./config/mycnf/mysql8026.cnf + binlog-transaction-compression=ON + EOF + + cat <<-EOF>>./config/mycnf/mysql8026.cnf + binlog-row-value-options=PARTIAL_JSON + EOF + + # run the tests however you normally do, then produce a JUnit XML file + eatmydata -- go run test.go -docker=false -follow -shard onlineddl_vrepl_bench | tee -a output.txt | go-junit-report -set-exit-code > report.xml + + - name: Print test output and Record test result in launchable if PR is not a draft + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + run: | + if [[ "${{steps.skip-workflow.outputs.is_draft}}" == "false" ]]; then + # send recorded tests to launchable + launchable record tests --build "$GITHUB_RUN_ID" go-test . || true + fi + + # print test output + cat output.txt + + - name: Test Summary + if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true' && always() + uses: test-summary/action@31493c76ec9e7aa675f1585d3ed6f1da69269a86 # v2.4 + with: + paths: "report.xml" + show: "fail" diff --git a/go/mysql/binlog_event.go b/go/mysql/binlog_event.go index 90c2d7b9668..979f7537ada 100644 --- a/go/mysql/binlog_event.go +++ b/go/mysql/binlog_event.go @@ -111,7 +111,7 @@ type BinlogEvent interface { // GTID returns the GTID from the event, and if this event // also serves as a BEGIN statement. // This is only valid if IsGTID() returns true. - GTID(BinlogFormat) (replication.GTID, bool, error) + GTID(BinlogFormat) (gtid replication.GTID, hasBegin bool, lastCommitted int64, sequenceNumber int64, err error) // Query returns a Query struct representing data from a QUERY_EVENT. // This is only valid if IsQuery() returns true. Query(BinlogFormat) (Query, error) diff --git a/go/mysql/binlog_event_filepos.go b/go/mysql/binlog_event_filepos.go index 8c60956faf1..d32a8c6d2c5 100644 --- a/go/mysql/binlog_event_filepos.go +++ b/go/mysql/binlog_event_filepos.go @@ -40,8 +40,8 @@ func newFilePosBinlogEvent(buf []byte) *filePosBinlogEvent { return &filePosBinlogEvent{binlogEvent: binlogEvent(buf)} } -func (*filePosBinlogEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { - return nil, false, nil +func (*filePosBinlogEvent) GTID(BinlogFormat) (replication.GTID, bool, int64, int64, error) { + return nil, false, 0, 0, nil } // IsSemiSyncAckRequested implements BinlogEvent.IsSemiSyncAckRequested(). @@ -224,8 +224,8 @@ func (ev filePosFakeEvent) Format() (BinlogFormat, error) { return BinlogFormat{}, nil } -func (ev filePosFakeEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { - return nil, false, nil +func (ev filePosFakeEvent) GTID(BinlogFormat) (replication.GTID, bool, int64, int64, error) { + return nil, false, 0, 0, nil } func (ev filePosFakeEvent) Query(BinlogFormat) (Query, error) { @@ -304,6 +304,6 @@ func (ev filePosGTIDEvent) StripChecksum(f BinlogFormat) (BinlogEvent, []byte, e return ev, nil, nil } -func (ev filePosGTIDEvent) GTID(BinlogFormat) (replication.GTID, bool, error) { - return ev.gtid, false, nil +func (ev filePosGTIDEvent) GTID(BinlogFormat) (replication.GTID, bool, int64, int64, error) { + return ev.gtid, false, 0, 0, nil } diff --git a/go/mysql/binlog_event_make_test.go b/go/mysql/binlog_event_make_test.go index 84535213cd9..9eef1a7108e 100644 --- a/go/mysql/binlog_event_make_test.go +++ b/go/mysql/binlog_event_make_test.go @@ -159,7 +159,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { event, _, err := event.StripChecksum(f) require.NoError(t, err, "StripChecksum failed: %v", err) - gtid, hasBegin, err := event.GTID(f) + gtid, hasBegin, _, _, err := event.GTID(f) require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.True(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") @@ -178,7 +178,7 @@ func TestMariadDBGTIDEVent(t *testing.T) { event, _, err = event.StripChecksum(f) require.NoError(t, err, "StripChecksum failed: %v", err) - gtid, hasBegin, err = event.GTID(f) + gtid, hasBegin, _, _, err = event.GTID(f) require.NoError(t, err, "NewMariaDBGTIDEvent().GTID() returned error: %v", err) require.False(t, hasBegin, "NewMariaDBGTIDEvent() didn't store hasBegin properly.") diff --git a/go/mysql/binlog_event_mariadb.go b/go/mysql/binlog_event_mariadb.go index f2c0ec8f369..f35edc0d9c7 100644 --- a/go/mysql/binlog_event_mariadb.go +++ b/go/mysql/binlog_event_mariadb.go @@ -60,17 +60,18 @@ func (ev mariadbBinlogEvent) IsGTID() bool { // 8 sequence number // 4 domain ID // 1 flags2 -func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { +func (ev mariadbBinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, int64, int64, error) { const FLStandalone = 1 data := ev.Bytes()[f.HeaderLength:] flags2 := data[8+4] - return replication.MariadbGTID{ + gtid := replication.MariadbGTID{ Sequence: binary.LittleEndian.Uint64(data[:8]), Domain: binary.LittleEndian.Uint32(data[8 : 8+4]), Server: ev.ServerID(), - }, flags2&FLStandalone == 0, nil + } + return gtid, flags2&FLStandalone == 0, 0, 0, nil } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). diff --git a/go/mysql/binlog_event_mariadb_test.go b/go/mysql/binlog_event_mariadb_test.go index c4eeac39c38..91a9b833602 100644 --- a/go/mysql/binlog_event_mariadb_test.go +++ b/go/mysql/binlog_event_mariadb_test.go @@ -74,7 +74,7 @@ func TestMariadbNotBeginGTID(t *testing.T) { input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)} want := false - if _, got, err := input.GTID(f); got != want { + if _, got, _, _, err := input.GTID(f); got != want { t.Errorf("%#v.GTID() = %v (%v), want %v", input, got, err, want) } } @@ -88,7 +88,7 @@ func TestMariadbIsBeginGTID(t *testing.T) { input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)} want := true - if _, got, err := input.GTID(f); got != want { + if _, got, _, _, err := input.GTID(f); got != want { t.Errorf("%#v.IsBeginGTID() = %v (%v), want %v", input, got, err, want) } } @@ -102,7 +102,7 @@ func TestMariadbStandaloneBinlogEventGTID(t *testing.T) { input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbStandaloneGTIDEvent)} want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 9} - got, hasBegin, err := input.GTID(f) + got, hasBegin, _, _, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.False(t, hasBegin, "unexpected hasBegin") assert.True(t, reflect.DeepEqual(got, want), "%#v.GTID() = %#v, want %#v", input, got, want) @@ -118,7 +118,7 @@ func TestMariadbBinlogEventGTID(t *testing.T) { input := mariadbBinlogEvent{binlogEvent: binlogEvent(mariadbBeginGTIDEvent)} want := replication.MariadbGTID{Domain: 0, Server: 62344, Sequence: 10} - got, hasBegin, err := input.GTID(f) + got, hasBegin, _, _, err := input.GTID(f) assert.NoError(t, err, "unexpected error: %v", err) assert.True(t, hasBegin, "unexpected !hasBegin") assert.True(t, reflect.DeepEqual(got, want), "%#v.GTID() = %#v, want %#v", input, got, want) diff --git a/go/mysql/binlog_event_mysql56.go b/go/mysql/binlog_event_mysql56.go index 3f931310ba9..7529df664ac 100644 --- a/go/mysql/binlog_event_mysql56.go +++ b/go/mysql/binlog_event_mysql56.go @@ -60,12 +60,27 @@ func (ev mysql56BinlogEvent) IsGTID() bool { // 1 flags // 16 SID (server UUID) // 8 GNO (sequence number, signed int) -func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (replication.GTID, bool, error) { +// 1 lt_type +// 8 last_committed +// 8 sequence_number +func (ev mysql56BinlogEvent) GTID(f BinlogFormat) (gtid replication.GTID, hasBegin bool, lastCommitted int64, sequenceNumber int64, err error) { data := ev.Bytes()[f.HeaderLength:] var sid replication.SID - copy(sid[:], data[1:1+16]) - gno := int64(binary.LittleEndian.Uint64(data[1+16 : 1+16+8])) - return replication.Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, nil + pos := 1 + copy(sid[:], data[pos:pos+16]) + pos += 16 // end of SID + gno := int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 // end of GNO + pos += 1 // end of lt_type + if len(data) >= pos+8 { + lastCommitted = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 // end of last_committed + if len(data) >= pos+8 { + sequenceNumber = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + // pos += 8 // end of sequence_number + return replication.Mysql56GTID{Server: sid, Sequence: gno}, false /* hasBegin */, lastCommitted, sequenceNumber, nil } // PreviousGTIDs implements BinlogEvent.PreviousGTIDs(). diff --git a/go/mysql/binlog_event_mysql56_test.go b/go/mysql/binlog_event_mysql56_test.go index ede2abece99..8a6a6edf1aa 100644 --- a/go/mysql/binlog_event_mysql56_test.go +++ b/go/mysql/binlog_event_mysql56_test.go @@ -34,7 +34,7 @@ import ( // Sample event data for MySQL 5.6. var ( mysql56FormatEvent = NewMysql56BinlogEvent([]byte{0x78, 0x4e, 0x49, 0x55, 0xf, 0x64, 0x0, 0x0, 0x0, 0x74, 0x0, 0x0, 0x0, 0x78, 0x0, 0x0, 0x0, 0x1, 0x0, 0x4, 0x0, 0x35, 0x2e, 0x36, 0x2e, 0x32, 0x34, 0x2d, 0x6c, 0x6f, 0x67, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x78, 0x4e, 0x49, 0x55, 0x13, 0x38, 0xd, 0x0, 0x8, 0x0, 0x12, 0x0, 0x4, 0x4, 0x4, 0x4, 0x12, 0x0, 0x0, 0x5c, 0x0, 0x4, 0x1a, 0x8, 0x0, 0x0, 0x0, 0x8, 0x8, 0x8, 0x2, 0x0, 0x0, 0x0, 0xa, 0xa, 0xa, 0x19, 0x19, 0x0, 0x1, 0x18, 0x4a, 0xf, 0xca}) - mysql56GTIDEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x21, 0x64, 0x0, 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0xf5, 0x2, 0x0, 0x0, 0x0, 0x0, 0x1, 0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x48, 0x45, 0x82, 0x27}) + mysql56GTIDEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x21, 0x64, 0x0, 0x0, 0x0, 0x30, 0x0, 0x0, 0x0, 0xf5, 0x2, 0x0, 0x0, 0x0, 0x0, 0x1, 0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a, 0x4, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 /* lt_type: */, 0x0 /* last_committed: */, 0x7, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 /* sequence_number: */, 0x9, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x48, 0x45, 0x82, 0x27}) mysql56QueryEvent = NewMysql56BinlogEvent([]byte{0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) mysql56SemiSyncNoAckQueryEvent = NewMysql56BinlogEvent([]byte{0xef, 0x00, 0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) mysql56SemiSyncAckQueryEvent = NewMysql56BinlogEvent([]byte{0xef, 0x01, 0xff, 0x4e, 0x49, 0x55, 0x2, 0x64, 0x0, 0x0, 0x0, 0x77, 0x0, 0x0, 0x0, 0xdb, 0x3, 0x0, 0x0, 0x0, 0x0, 0x3d, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x4, 0x0, 0x0, 0x21, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x20, 0x0, 0x0, 0x0, 0x0, 0x0, 0x6, 0x3, 0x73, 0x74, 0x64, 0x4, 0x8, 0x0, 0x8, 0x0, 0x21, 0x0, 0xc, 0x1, 0x74, 0x65, 0x73, 0x74, 0x0, 0x74, 0x65, 0x73, 0x74, 0x0, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x20, 0x28, 0x27, 0x68, 0x65, 0x6c, 0x6c, 0x6f, 0x27, 0x29, 0x92, 0x12, 0x79, 0xc3}) @@ -90,10 +90,12 @@ func TestMysql56GTID(t *testing.T) { Server: replication.SID{0x43, 0x91, 0x92, 0xbd, 0xf3, 0x7c, 0x11, 0xe4, 0xbb, 0xeb, 0x2, 0x42, 0xac, 0x11, 0x3, 0x5a}, Sequence: 4, } - got, hasBegin, err := input.GTID(format) + got, hasBegin, lastCommitted, sequenceNumber, err := input.GTID(format) require.NoError(t, err, "GTID() error: %v", err) assert.False(t, hasBegin, "GTID() returned hasBegin") assert.Equal(t, want, got, "GTID() = %#v, want %#v", got, want) + assert.Equal(t, int64(7), lastCommitted) + assert.Equal(t, int64(9), sequenceNumber) } func TestMysql56DecodeTransactionPayload(t *testing.T) { diff --git a/go/mysql/endtoend/replication_test.go b/go/mysql/endtoend/replication_test.go index a04f75c6b43..4c0e3f5864a 100644 --- a/go/mysql/endtoend/replication_test.go +++ b/go/mysql/endtoend/replication_test.go @@ -230,7 +230,7 @@ func TestRowReplicationWithRealDatabase(t *testing.T) { switch { case be.IsGTID(): // We expect one of these at least. - gtid, hasBegin, err := be.GTID(f) + gtid, hasBegin, _, _, err := be.GTID(f) if err != nil { t.Fatalf("GTID event is broken: %v", err) } diff --git a/go/mysql/replication/mysql56_gtid.go b/go/mysql/replication/mysql56_gtid.go index dd55caf1949..7dd8c81e4b0 100644 --- a/go/mysql/replication/mysql56_gtid.go +++ b/go/mysql/replication/mysql56_gtid.go @@ -94,8 +94,7 @@ func ParseSID(s string) (sid SID, err error) { type Mysql56GTID struct { // Server is the SID of the server that originally committed the transaction. Server SID - // Sequence is the sequence number of the transaction within a given Server's - // scope. + // Sequence is the sequence number of the transaction within a given Server's scope. Sequence int64 } diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index e052762cd13..41acd0dea4e 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -21,7 +21,6 @@ import ( "flag" "fmt" "math/rand/v2" - "net/http" "os" "path" "strings" @@ -33,6 +32,7 @@ import ( "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/mysql/capabilities" "vitess.io/vitess/go/vt/log" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" "vitess.io/vitess/go/vt/schema" "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" @@ -207,7 +207,7 @@ func TestRevertSchemaChanges(t *testing.T) { require.Equal(t, 1, len(shards)) throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance) - throttler.WaitForCheckThrottlerResult(t, clusterInstance, primaryTablet, throttlerapp.TestingName, nil, http.StatusOK, time.Minute) + throttler.WaitForCheckThrottlerResult(t, clusterInstance, primaryTablet, throttlerapp.TestingName, nil, tabletmanagerdatapb.CheckThrottlerResponseCode_OK, time.Minute) t.Run("revertible", testRevertible) t.Run("revert", testRevert) diff --git a/go/test/endtoend/onlineddl/vrepl_bench/onlineddl_vrepl_mini_bench_test.go b/go/test/endtoend/onlineddl/vrepl_bench/onlineddl_vrepl_mini_bench_test.go new file mode 100644 index 00000000000..e3d3f21eb47 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_bench/onlineddl_vrepl_mini_bench_test.go @@ -0,0 +1,779 @@ +/* +Copyright 2019 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplstress + +import ( + "context" + "flag" + "fmt" + "math/rand/v2" + "os" + "path" + "runtime" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + "vitess.io/vitess/go/test/endtoend/throttler" + "vitess.io/vitess/go/vt/log" + tabletmanagerdatapb "vitess.io/vitess/go/vt/proto/tabletmanagerdata" + "vitess.io/vitess/go/vt/schema" + vttablet "vitess.io/vitess/go/vt/vttablet/common" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +type WriteMetrics struct { + mu sync.Mutex + insertsAttempts, insertsFailures, insertsNoops, inserts int64 + updatesAttempts, updatesFailures, updatesNoops, updates int64 + deletesAttempts, deletesFailures, deletesNoops, deletes int64 +} + +func (w *WriteMetrics) Clear() { + w.mu.Lock() + defer w.mu.Unlock() + + w.inserts = 0 + w.updates = 0 + w.deletes = 0 + + w.insertsAttempts = 0 + w.insertsFailures = 0 + w.insertsNoops = 0 + + w.updatesAttempts = 0 + w.updatesFailures = 0 + w.updatesNoops = 0 + + w.deletesAttempts = 0 + w.deletesFailures = 0 + w.deletesNoops = 0 +} + +func (w *WriteMetrics) String() string { + return fmt.Sprintf(`WriteMetrics: inserts-deletes=%d, updates-deletes=%d, +insertsAttempts=%d, insertsFailures=%d, insertsNoops=%d, inserts=%d, +updatesAttempts=%d, updatesFailures=%d, updatesNoops=%d, updates=%d, +deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, +`, + w.inserts-w.deletes, w.updates-w.deletes, + w.insertsAttempts, w.insertsFailures, w.insertsNoops, w.inserts, + w.updatesAttempts, w.updatesFailures, w.updatesNoops, w.updates, + w.deletesAttempts, w.deletesFailures, w.deletesNoops, w.deletes, + ) +} + +var ( + clusterInstance *cluster.LocalProcessCluster + shards []cluster.Shard + vtParams mysql.ConnParams + primaryTablet *cluster.Vttablet + replicaTablet *cluster.Vttablet + throttlerCheckOK atomic.Bool + + opOrder int64 + opOrderMutex sync.Mutex + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + cleanupStatements = []string{ + `DROP TABLE IF EXISTS stress_test`, + `DROP TABLE IF EXISTS t1`, + } + createStatement = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + op_order bigint unsigned not null default 0, + hint_col varchar(64) not null default '', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB; + CREATE TABLE t1 ( + id bigint(20) not null, + i int not null default 0, + PRIMARY KEY (id) + ) ENGINE=InnoDB; + ` + alterHintStatement = ` + ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s' + ` + insertRowStatement = ` + INSERT IGNORE INTO stress_test (id, rand_val, op_order) VALUES (%d, left(md5(rand()), 8), %d) + ` + updateRowStatement = ` + UPDATE stress_test SET op_order=%d, updates=updates+1 WHERE id=%d + ` + deleteRowStatement = ` + DELETE FROM stress_test WHERE id=%d AND updates=1 + ` + selectMaxOpOrder = ` + SELECT MAX(op_order) as m FROM stress_test + ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test + ` + truncateStatement = ` + TRUNCATE TABLE stress_test + ` + writeMetrics WriteMetrics +) + +var ( + countIterations = 5 +) + +const ( + maxTableRows = 4096 + workloadDuration = 45 * time.Second + migrationWaitTimeout = 60 * time.Second +) + +func resetOpOrder() { + opOrderMutex.Lock() + defer opOrderMutex.Unlock() + opOrder = 0 +} + +func nextOpOrder() int64 { + opOrderMutex.Lock() + defer opOrderMutex.Unlock() + opOrder++ + return opOrder +} + +func TestMain(m *testing.M) { + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "--schema_change_dir", schemaChangeDirectory, + "--schema_change_controller", "local", + "--schema_change_check_interval", "1s", + } + + clusterInstance.VtTabletExtraArgs = []string{ + "--heartbeat_interval", "250ms", + "--heartbeat_on_demand_duration", fmt.Sprintf("%v", migrationWaitTimeout*2), + "--migration_check_interval", "2s", + "--watch_replication_stream", + // Test VPlayer batching mode. + fmt.Sprintf("--vreplication_experimental_flags=%d", + // vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts), + // vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerParallel), + // vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching), + vttablet.VReplicationExperimentalFlagAllowNoBlobBinlogRowImage|vttablet.VReplicationExperimentalFlagOptimizeInserts|vttablet.VReplicationExperimentalFlagVPlayerBatching|vttablet.VReplicationExperimentalFlagVPlayerParallel), + } + clusterInstance.VtGateExtraArgs = []string{ + "--ddl_strategy", "online", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + primaryTablet = clusterInstance.Keyspaces[0].Shards[0].Vttablets[0] + if len(clusterInstance.Keyspaces[0].Shards[0].Vttablets) > 1 { + replicaTablet = clusterInstance.Keyspaces[0].Shards[0].Vttablets[1] + } + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestVreplMiniStressSchemaChanges(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + shards = clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + throttler.EnableLagThrottlerAndWaitForStatus(t, clusterInstance) + + t.Run("validate config on primary", func(t *testing.T) { + // Validate the config + conn, err := primaryTablet.VttabletProcess.TabletConn(keyspaceName, true) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set @@global.binlog_transaction_dependency_tracking='WRITESET'", 1, true) + require.NoError(t, err) + + { + rs, err := conn.ExecuteFetch("select @@global.binlog_transaction_dependency_tracking as v", 1, true) + require.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + t.Logf("binlog_transaction_dependency_tracking: %v", row.AsString("v", "")) + } + _, err = conn.ExecuteFetch("set @@global.replica_preserve_commit_order=1", 1, true) + require.NoError(t, err) + }) + t.Run("validate config on replica", func(t *testing.T) { + if replicaTablet == nil { + t.SkipNow() + } + // Validate the config + conn, err := replicaTablet.VttabletProcess.TabletConn(keyspaceName, true) + require.NoError(t, err) + defer conn.Close() + { + rs, err := conn.ExecuteFetch("select @@global.replica_parallel_workers as val", 1, true) + require.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + parallelWorkers := row.AsInt64("val", 0) + require.Positive(t, parallelWorkers) + t.Logf("replica_parallel_workers: %v", parallelWorkers) + } + { + rs, err := conn.ExecuteFetch("select @@global.replica_preserve_commit_order as val", 1, true) + require.NoError(t, err) + row := rs.Named().Row() + require.NotNil(t, row) + preserveCommitOrder := row.AsInt64("val", 0) + require.Positive(t, preserveCommitOrder) + t.Logf("replica_preserve_commit_order: %v", preserveCommitOrder) + } + _, err = conn.ExecuteFetch("set @@global.binlog_transaction_dependency_tracking='WRITESET'", 1, true) + require.NoError(t, err) + }) + + throttlerCheckOK.Store(true) + + results := []time.Duration{} + for i := 0; i < countIterations; i++ { + // Finally, this is the real test: + // We populate a table, and begin a concurrent workload (this is the "mini stress") + // We then ALTER TABLE via vreplication. + // Once convinced ALTER TABLE is complete, we stop the workload. + // We then compare expected metrics with table metrics. If they agree, then + // the vreplication/ALTER TABLE did not corrupt our data and we are happy. + testName := fmt.Sprintf("ALTER TABLE with workload %d/%d", (i + 1), countIterations) + t.Run(testName, func(t *testing.T) { + t.Run("create schema", func(t *testing.T) { + testWithInitialSchema(t) + }) + t.Run("init table", func(t *testing.T) { + initTable(t) + }) + + var uuid string + t.Run("start migration", func(t *testing.T) { + hint := fmt.Sprintf("hint-alter-with-workload-%d", i) + uuid = testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online --postpone-completion --force-cut-over-after=1ns", "", true) + }) + t.Run("wait for ready_to_complete", func(t *testing.T) { + waitForReadyToComplete(t, uuid, true) + }) + t.Run("throttle online-ddl", func(t *testing.T) { + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) + onlineddl.ThrottleAllMigrations(t, &vtParams) + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + throttler.WaitForCheckThrottlerResult(t, clusterInstance, primaryTablet, throttlerapp.OnlineDDLName, nil, tabletmanagerdatapb.CheckThrottlerResponseCode_APP_DENIED, time.Minute) + }) + readPos := func(t *testing.T) { + { + rs, err := primaryTablet.VttabletProcess.QueryTablet("select @@global.gtid_executed", keyspaceName, true) + require.NoError(t, err) + t.Logf("gtid executed: %v", rs.Rows[0][0].ToString()) + } + { + query := fmt.Sprintf("select pos from _vt.vreplication where workflow='%s'", uuid) + rs, err := primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + t.Logf("vreplication pos: %v", rs.Rows[0][0].ToString()) + } + } + t.Run("read pos", func(t *testing.T) { + readPos(t) + }) + t.Run("mark for completion", func(t *testing.T) { + onlineddl.CheckCompleteAllMigrations(t, &vtParams, 1) + }) + t.Run(fmt.Sprintf("start workload: %v", workloadDuration), func(t *testing.T) { + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + ctx, cancel := context.WithTimeout(ctx, workloadDuration) + defer cancel() + runMultipleConnections(ctx, t) + }) + t.Run("read pos", func(t *testing.T) { + readPos(t) + }) + t.Run("validate throttler at end of workload", func(t *testing.T) { + onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, true) + throttler.WaitForCheckThrottlerResult(t, clusterInstance, primaryTablet, throttlerapp.OnlineDDLName, nil, tabletmanagerdatapb.CheckThrottlerResponseCode_APP_DENIED, time.Second) + }) + var startTime = time.Now() + t.Run("unthrottle online-ddl", func(t *testing.T) { + onlineddl.UnthrottleAllMigrations(t, &vtParams) + if !onlineddl.CheckThrottledApps(t, &vtParams, throttlerapp.OnlineDDLName, false) { + status, err := throttler.GetThrottlerStatus(&clusterInstance.VtctldClientProcess, primaryTablet) + assert.NoError(t, err) + + t.Logf("Throttler status: %+v", status) + } + }) + t.Run("read pos", func(t *testing.T) { + readPos(t) + }) + t.Run("wait for migration to complete", func(t *testing.T) { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + t.Logf("# Migration status (for debug purposes): <%s>", status) + if !onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) { + query := fmt.Sprintf("select * from _vt.vreplication where workflow='%s'", uuid) + rs, err := primaryTablet.VttabletProcess.QueryTablet(query, keyspaceName, false) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + t.Logf("vreplication: %v", rs.Rows[0]) + } + + onlineddl.CheckCancelAllMigrations(t, &vtParams, -1) + }) + t.Run("read pos", func(t *testing.T) { + readPos(t) + }) + endTime := time.Now() + results = append(results, endTime.Sub(startTime)) + t.Logf(":::::::::::::::::::: Workload catchup took %v ::::::::::::::::::::", endTime.Sub(startTime)) + t.Run("cleanup", func(t *testing.T) { + throttler.WaitForCheckThrottlerResult(t, clusterInstance, primaryTablet, throttlerapp.OnlineDDLName, nil, tabletmanagerdatapb.CheckThrottlerResponseCode_OK, time.Minute) + }) + t.Run("validate metrics", func(t *testing.T) { + testSelectTableMetrics(t) + }) + }) + } + + t.Run("summary", func(t *testing.T) { + t.Logf(":::::::::::::::::::: Workload catchup took: %+v ::::::::::::::::::::", results) + }) +} + +func testWithInitialSchema(t *testing.T) { + for _, statement := range cleanupStatements { + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, statement) + require.NoError(t, err) + } + // Create the stress table + err := clusterInstance.VtctldClientProcess.ApplySchema(keyspaceName, createStatement) + require.NoError(t, err) + + // Check if table is created + checkTable(t, tableName) +} + +func waitForReadyToComplete(t *testing.T, uuid string, expected bool) bool { + ctx, cancel := context.WithTimeout(context.Background(), migrationWaitTimeout) + defer cancel() + + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + for { + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + readyToComplete := row.AsInt64("ready_to_complete", 0) + if expected == (readyToComplete > 0) { + // all good. This is what we waited for + if expected { + // if migration is ready to complete, the timestamp should be non-null + assert.False(t, row["ready_to_complete_timestamp"].IsNull()) + } else { + assert.True(t, row["ready_to_complete_timestamp"].IsNull()) + } + return true + } + } + select { + case <-ticker.C: + case <-ctx.Done(): + assert.NoError(t, ctx.Err(), "timeout waiting for ready_to_complete") + return false + } + } +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, expectHint string, skipWait bool) (uuid string) { + row := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, "").Named().Row() + require.NotNil(t, row) + uuid = row.AsString("uuid", "") + uuid = strings.TrimSpace(uuid) + require.NotEmpty(t, uuid) + t.Logf("# Generated UUID (for debug purposes):") + t.Logf("<%s>", uuid) + + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) + assert.NoError(t, err) + + if !strategySetting.Strategy.IsDirect() && !skipWait && uuid != "" { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, migrationWaitTimeout, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + t.Logf("# Migration status (for debug purposes): <%s>", status) + } + + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string) { + for i := range clusterInstance.Keyspaces[0].Shards { + checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, 1) + } +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + ticker := time.NewTicker(time.Second) + defer ticker.Stop() + + rowcount := 0 + + for { + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.NoError(t, err) + rowcount = len(queryResult.Rows) + if rowcount > 0 { + break + } + + select { + case <-ticker.C: + continue // Keep looping + case <-ctx.Done(): + // Break below to the assertion + } + + break + } + + assert.Equal(t, expectCount, rowcount) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.NoError(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func generateInsert(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(insertRowStatement, id, nextOpOrder()) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.insertsAttempts++ + if err != nil { + writeMetrics.insertsFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.insertsNoops++ + return + } + writeMetrics.inserts++ + }() + { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf("insert into t1 values (%d, %d)", id, id) + _, _ = conn.ExecuteFetch(query, 1000, true) + } + return err +} + +func generateUpdate(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(updateRowStatement, nextOpOrder(), id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.updatesAttempts++ + if err != nil { + writeMetrics.updatesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.updatesNoops++ + return + } + writeMetrics.updates++ + }() + { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf("update t1 set i=i+1 where id=%d", id) + _, _ = conn.ExecuteFetch(query, 1000, true) + } + return err +} + +func generateDelete(t *testing.T, conn *mysql.Conn) error { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf(deleteRowStatement, id) + qr, err := conn.ExecuteFetch(query, 1000, true) + + func() { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + writeMetrics.deletesAttempts++ + if err != nil { + writeMetrics.deletesFailures++ + return + } + assert.Less(t, qr.RowsAffected, uint64(2)) + if qr.RowsAffected == 0 { + writeMetrics.deletesNoops++ + return + } + writeMetrics.deletes++ + }() + { + id := rand.Int32N(int32(maxTableRows)) + query := fmt.Sprintf("delete from t1 where id=%d", id) + _, _ = conn.ExecuteFetch(query, 1000, true) + } + return err +} + +func runSingleConnection(ctx context.Context, t *testing.T, sleepInterval time.Duration) { + log.Infof("Running single connection") + + // conn, err := mysql.Connect(ctx, &vtParams) + conn, err := primaryTablet.VttabletProcess.TabletConn(keyspaceName, true) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("set innodb_lock_wait_timeout=5", 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch("set autocommit=1", 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch("set transaction isolation level read committed", 1000, true) + require.NoError(t, err) + + ticker := time.NewTicker(sleepInterval) + defer ticker.Stop() + + log.Infof("+- Starting single connection") + defer log.Infof("+- Terminating single connection") + for { + select { + case <-ctx.Done(): + log.Infof("runSingleConnection context timeout") + return + case <-ticker.C: + } + if !throttlerCheckOK.Load() { + continue + } + switch rand.Int32N(3) { + case 0: + err = generateInsert(t, conn) + case 1: + err = generateUpdate(t, conn) + case 2: + err = generateDelete(t, conn) + } + assert.Nil(t, err) + } +} + +func runMultipleConnections(ctx context.Context, t *testing.T) { + // The workload for a 16 vCPU machine is: + // - Concurrency of 16 + // - 2ms interval between queries for each connection + // As the number of vCPUs decreases, so do we decrease concurrency, and increase intervals. For example, on a 8 vCPU machine + // we run concurrency of 8 and interval of 4ms. On a 4 vCPU machine we run concurrency of 4 and interval of 8ms. + maxConcurrency := runtime.NumCPU() + sleepModifier := 16.0 / float64(maxConcurrency) + baseSleepInterval := 2 * time.Millisecond + singleConnectionSleepIntervalNanoseconds := float64(baseSleepInterval.Nanoseconds()) * sleepModifier + sleepInterval := time.Duration(int64(singleConnectionSleepIntervalNanoseconds)) + + log.Infof("Running multiple connections: maxConcurrency=%v, sleep interval=%v", maxConcurrency, sleepInterval) + var wg sync.WaitGroup + for i := 0; i < maxConcurrency; i++ { + wg.Add(1) + go func() { + defer wg.Done() + runSingleConnection(ctx, t, sleepInterval) + }() + } + flushBinlogs := func() { + // Flushing binlogs on primary restarts imposes more challenges to parallel vplayer + // because a binlog rotation is a parallellism barrier: all events from previous binlog + // must be consumed before starting to apply events in new binlog. + conn, err := primaryTablet.VttabletProcess.TabletConn(keyspaceName, true) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch("flush binary logs", 1000, true) + require.NoError(t, err) + } + time.AfterFunc(200*time.Millisecond, flushBinlogs) + time.AfterFunc(400*time.Millisecond, flushBinlogs) + wg.Wait() + log.Infof("Running multiple connections: done") +} + +func initTable(t *testing.T) { + log.Infof("initTable begin") + defer log.Infof("initTable complete") + + t.Run("cancel pending migrations", func(t *testing.T) { + cancelQuery := "alter vitess_migration cancel all" + r := onlineddl.VtgateExecQuery(t, &vtParams, cancelQuery, "") + if r.RowsAffected > 0 { + fmt.Printf("# Cancelled migrations (for debug purposes): %d\n", r.RowsAffected) + } + }) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + resetOpOrder() + writeMetrics.Clear() + _, err = conn.ExecuteFetch(truncateStatement, 1000, true) + require.NoError(t, err) + + for i := 0; i < maxTableRows/2; i++ { + generateInsert(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateUpdate(t, conn) + } + for i := 0; i < maxTableRows/4; i++ { + generateDelete(t, conn) + } +} + +func testSelectTableMetrics(t *testing.T) { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + { + rs := onlineddl.VtgateExecQuery(t, &vtParams, selectMaxOpOrder, "") + row := rs.Named().Row() + require.NotNil(t, row) + + maxOpOrder := row.AsInt64("m", 0) + fmt.Printf("# max op_order in table: %d\n", maxOpOrder) + } + + log.Infof("%s", writeMetrics.String()) + + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true) + require.NoError(t, err) + + row := rs.Named().Row() + require.NotNil(t, row) + log.Infof("testSelectTableMetrics, row: %v", row) + numRows := row.AsInt64("num_rows", 0) + sumUpdates := row.AsInt64("sum_updates", 0) + assert.NotZero(t, numRows) + assert.NotZero(t, sumUpdates) + assert.NotZero(t, writeMetrics.inserts) + assert.NotZero(t, writeMetrics.deletes) + assert.NotZero(t, writeMetrics.updates) + assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows) + assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1 +} diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go index 4a2f7f1a3ce..75b5ee1c9bb 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -311,10 +311,10 @@ func testSingle(t *testing.T, testName string, fkOnlineDDLPossible bool) { } orderBy := "" if content, exists := readTestFile(t, testName, "order_by"); exists { - orderBy = fmt.Sprintf("order by %s", content) + orderBy = fmt.Sprintf(" order by %s", content) } - selectBefore := fmt.Sprintf("select %s from %s %s", beforeColumns, beforeTableName, orderBy) - selectAfter := fmt.Sprintf("select %s from %s %s", afterColumns, afterTableName, orderBy) + selectBefore := fmt.Sprintf("select %s from %s%s", beforeColumns, beforeTableName, orderBy) + selectAfter := fmt.Sprintf("select %s from %s%s", afterColumns, afterTableName, orderBy) selectBeforeFile := onlineddl.CreateTempScript(t, selectBefore) defer os.Remove(selectBeforeFile) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql index 99018d8c798..79cab9748bc 100644 --- a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql @@ -11,6 +11,8 @@ create table onlineddl_test ( key i_idx(i) ) auto_increment=1; +insert into onlineddl_test values (null, 7, null, now(), now(), '2010-10-20 07:20:30', 0); + drop event if exists onlineddl_test; delimiter ;; create event onlineddl_test diff --git a/go/test/endtoend/throttler/util.go b/go/test/endtoend/throttler/util.go index 162388c83e0..d01b9e7d052 100644 --- a/go/test/endtoend/throttler/util.go +++ b/go/test/endtoend/throttler/util.go @@ -485,7 +485,7 @@ func EnableLagThrottlerAndWaitForStatus(t *testing.T, clusterInstance *cluster.L } } -func WaitForCheckThrottlerResult(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, appName throttlerapp.Name, flags *throttle.CheckFlags, expect int32, timeout time.Duration) (*vtctldatapb.CheckThrottlerResponse, error) { +func WaitForCheckThrottlerResult(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, appName throttlerapp.Name, flags *throttle.CheckFlags, expect tabletmanagerdatapb.CheckThrottlerResponseCode, timeout time.Duration) (*vtctldatapb.CheckThrottlerResponse, error) { ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() ticker := time.NewTicker(time.Second) @@ -493,7 +493,10 @@ func WaitForCheckThrottlerResult(t *testing.T, clusterInstance *cluster.LocalPro for { resp, err := CheckThrottler(clusterInstance, tablet, appName, flags) require.NoError(t, err) - if resp.Check.StatusCode == expect { + if resp.Check.StatusCode == int32(expect) { + return resp, nil + } + if resp.Check.ResponseCode == expect { return resp, nil } select { diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index 08e06ec803c..b737e9b35d1 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -19,6 +19,7 @@ package binlog import ( "bytes" "context" + "errors" "fmt" "io" "strings" @@ -34,6 +35,7 @@ import ( "vitess.io/vitess/go/vt/dbconfigs" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/sqlparser" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/vt/vttablet/tabletserver/schema" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -278,7 +280,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog Position: replication.EncodePosition(pos), } if err = bls.sendTransaction(eventToken, statements); err != nil { - if err == io.EOF { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { return ErrClientEOF } return fmt.Errorf("send reply error: %v", err) @@ -344,7 +346,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog switch { case ev.IsPseudo(): - gtid, _, err = ev.GTID(format) + gtid, _, _, _, err = ev.GTID(format) if err != nil { return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } @@ -360,7 +362,7 @@ func (bls *Streamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } case ev.IsGTID(): // GTID_EVENT: update current GTID, maybe BEGIN. var hasBegin bool - gtid, hasBegin, err = ev.GTID(format) + gtid, hasBegin, _, _, err = ev.GTID(format) if err != nil { return pos, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } diff --git a/go/vt/proto/binlogdata/binlogdata.pb.go b/go/vt/proto/binlogdata/binlogdata.pb.go index e3523b6b384..c0f5990d9f0 100644 --- a/go/vt/proto/binlogdata/binlogdata.pb.go +++ b/go/vt/proto/binlogdata/binlogdata.pb.go @@ -303,6 +303,8 @@ const ( // If a client experiences some disruptions before receiving the event, // the client should restart the copy operation. VEventType_COPY_COMPLETED VEventType = 20 + // Indicates rotation into a new binary log + VEventType_PREVIOUS_GTIDS VEventType = 21 ) // Enum value maps for VEventType. @@ -329,6 +331,7 @@ var ( 18: "LASTPK", 19: "SAVEPOINT", 20: "COPY_COMPLETED", + 21: "PREVIOUS_GTIDS", } VEventType_value = map[string]int32{ "UNKNOWN": 0, @@ -352,6 +355,7 @@ var ( "LASTPK": 18, "SAVEPOINT": 19, "COPY_COMPLETED": 20, + "PREVIOUS_GTIDS": 21, } ) @@ -1913,6 +1917,12 @@ type VEvent struct { Throttled bool `protobuf:"varint,24,opt,name=throttled,proto3" json:"throttled,omitempty"` // ThrottledReason is a human readable string that explains why the stream is throttled ThrottledReason string `protobuf:"bytes,25,opt,name=throttled_reason,json=throttledReason,proto3" json:"throttled_reason,omitempty"` + // For GTID events, the sequence number of the most recent transaction this event depends on / conflicts with. + LastCommitted int64 `protobuf:"varint,26,opt,name=last_committed,json=lastCommitted,proto3" json:"last_committed,omitempty"` + // For GTID events, the sequence number of this transaction. + SequenceNumber int64 `protobuf:"varint,27,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number,omitempty"` + // MustSave is a decoration by VPlayer + MustSave bool `protobuf:"varint,28,opt,name=must_save,json=mustSave,proto3" json:"must_save,omitempty"` } func (x *VEvent) Reset() { @@ -2050,6 +2060,27 @@ func (x *VEvent) GetThrottledReason() string { return "" } +func (x *VEvent) GetLastCommitted() int64 { + if x != nil { + return x.LastCommitted + } + return 0 +} + +func (x *VEvent) GetSequenceNumber() int64 { + if x != nil { + return x.SequenceNumber + } + return 0 +} + +func (x *VEvent) GetMustSave() bool { + if x != nil { + return x.MustSave + } + return false +} + type MinimalTable struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -3321,7 +3352,7 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x69, 0x63, 0x69, 0x70, 0x61, 0x6e, 0x74, 0x73, 0x12, 0x29, 0x0a, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, - 0x6f, 0x77, 0x73, 0x22, 0xb6, 0x04, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, + 0x6f, 0x77, 0x73, 0x22, 0xa3, 0x05, 0x0a, 0x06, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x2a, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x16, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, @@ -3356,35 +3387,70 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x19, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0x8d, 0x01, 0x0a, - 0x0c, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, - 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, - 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, - 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x70, 0x4b, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0d, - 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, - 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, - 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, - 0xd9, 0x01, 0x0a, 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x10, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, - 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, - 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, 0x02, 0x0a, 0x0e, - 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, + 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x1a, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, + 0x74, 0x65, 0x64, 0x12, 0x27, 0x0a, 0x0f, 0x73, 0x65, 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x65, + 0x71, 0x75, 0x65, 0x6e, 0x63, 0x65, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x75, 0x73, 0x74, 0x5f, 0x73, 0x61, 0x76, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x6d, 0x75, 0x73, 0x74, 0x53, 0x61, 0x76, 0x65, 0x22, 0x8d, 0x01, 0x0a, 0x0c, 0x4d, 0x69, + 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x24, + 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, + 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x12, 0x1e, 0x0a, 0x0b, 0x70, 0x5f, 0x6b, 0x5f, 0x63, 0x6f, 0x6c, 0x75, + 0x6d, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x03, 0x52, 0x09, 0x70, 0x4b, 0x43, 0x6f, 0x6c, + 0x75, 0x6d, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0e, 0x70, 0x5f, 0x6b, 0x5f, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x70, 0x4b, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x41, 0x0a, 0x0d, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x61, 0x6c, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x30, 0x0a, 0x06, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x62, 0x69, 0x6e, + 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x61, 0x6c, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x06, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x22, 0xd9, 0x01, 0x0a, + 0x0e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, + 0x27, 0x0a, 0x0f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0e, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x5a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x5f, 0x6f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, 0x65, 0x72, 0x72, + 0x69, 0x64, 0x65, 0x73, 0x1a, 0x42, 0x0a, 0x14, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4f, 0x76, + 0x65, 0x72, 0x72, 0x69, 0x64, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, + 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xfd, 0x02, 0x0a, 0x0e, 0x56, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, + 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, + 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, + 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, + 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x6f, + 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, + 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, + 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, + 0x4b, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, + 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x56, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, @@ -3395,171 +3461,144 @@ var file_binlogdata_proto_rawDesc = []byte{ 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, - 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x1a, 0x0a, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x06, 0x66, 0x69, 0x6c, - 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x62, 0x69, 0x6e, 0x6c, - 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x52, 0x06, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x0f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, - 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, - 0x73, 0x74, 0x50, 0x4b, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, - 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3d, 0x0a, 0x0f, 0x56, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2a, - 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x45, 0x76, 0x65, - 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x22, 0xbb, 0x02, 0x0a, 0x12, 0x56, - 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, - 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, - 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, - 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, - 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, - 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, - 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, - 0x70, 0x6b, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x02, 0x0a, 0x13, 0x56, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, - 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, - 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, - 0x74, 0x74, 0x6c, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, - 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, - 0x65, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, - 0x62, 0x65, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, - 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, - 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, - 0xfb, 0x01, 0x0a, 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, - 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, - 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, - 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, - 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, - 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, - 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, - 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, - 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, - 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xde, 0x01, - 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, - 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, - 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, - 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, - 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, - 0x77, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, - 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, - 0x73, 0x74, 0x70, 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0x69, - 0x0a, 0x0b, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, - 0x0e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, - 0x74, 0x61, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, - 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, - 0x6b, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, - 0x74, 0x70, 0x6b, 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, - 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, - 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, - 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, - 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, - 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, - 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, - 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, - 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, - 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x72, 0x79, 0x22, 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, - 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, - 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, - 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, - 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, - 0x00, 0x12, 0x08, 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, - 0x58, 0x45, 0x43, 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, - 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, - 0x65, 0x10, 0x00, 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, - 0x67, 0x72, 0x61, 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, - 0x72, 0x64, 0x10, 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, - 0x4c, 0x10, 0x05, 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, 0x6f, - 0x6d, 0x69, 0x63, 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, - 0x77, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, - 0x6e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, 0x0a, - 0x07, 0x53, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, - 0x70, 0x79, 0x69, 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, 0x12, - 0x0b, 0x0a, 0x07, 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0x8d, 0x02, 0x0a, - 0x0a, 0x56, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, - 0x4e, 0x4b, 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, - 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, - 0x06, 0x43, 0x4f, 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, - 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, - 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, - 0x52, 0x45, 0x50, 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, - 0x41, 0x54, 0x45, 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, - 0x09, 0x12, 0x07, 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, - 0x48, 0x45, 0x52, 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, - 0x0a, 0x05, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, - 0x52, 0x54, 0x42, 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, - 0x44, 0x10, 0x0f, 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, - 0x12, 0x0b, 0x0a, 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, - 0x06, 0x4c, 0x41, 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, - 0x45, 0x50, 0x4f, 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, - 0x5f, 0x43, 0x4f, 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x2a, 0x27, 0x0a, 0x0d, - 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, - 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, 0x41, - 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2e, - 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, + 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x56, 0x53, + 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xa4, 0x02, 0x0a, 0x13, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, + 0x6d, 0x52, 0x6f, 0x77, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, + 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, + 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, + 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, + 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, + 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, + 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, + 0x61, 0x73, 0x74, 0x70, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, + 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, + 0x6c, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x12, 0x29, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x5f, 0x72, + 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x74, 0x68, 0x72, + 0x6f, 0x74, 0x74, 0x6c, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x22, 0xfb, 0x01, 0x0a, + 0x14, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, 0x43, 0x61, 0x6c, 0x6c, 0x65, + 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x43, 0x61, + 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, + 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x56, 0x54, 0x47, 0x61, + 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x69, 0x6d, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x25, 0x0a, + 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0d, 0x2e, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x06, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x12, 0x34, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xde, 0x01, 0x0a, 0x15, 0x56, + 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, + 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, 0x28, 0x0a, 0x08, 0x70, 0x6b, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, + 0x65, 0x72, 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x08, 0x70, 0x6b, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x67, 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, + 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, + 0x77, 0x52, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x12, 0x22, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, + 0x6b, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, + 0x52, 0x6f, 0x77, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x22, 0x69, 0x0a, 0x0b, 0x4c, + 0x61, 0x73, 0x74, 0x50, 0x4b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x0e, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x70, 0x5f, 0x6b, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x52, 0x0b, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, 0x6f, 0x6d, + 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x22, 0x58, 0x0a, 0x0b, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4c, + 0x61, 0x73, 0x74, 0x50, 0x4b, 0x12, 0x1d, 0x0a, 0x0a, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x51, 0x75, 0x65, + 0x72, 0x79, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x06, 0x6c, 0x61, 0x73, 0x74, 0x70, 0x6b, + 0x22, 0xdc, 0x01, 0x0a, 0x15, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x13, 0x65, 0x66, + 0x66, 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x76, 0x74, 0x72, 0x70, 0x63, 0x2e, + 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, 0x11, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x64, 0x12, 0x45, 0x0a, 0x13, 0x69, + 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x5f, 0x63, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, + 0x2e, 0x56, 0x54, 0x47, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, 0x49, 0x44, 0x52, + 0x11, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x43, 0x61, 0x6c, 0x6c, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x25, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0d, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x22, + 0x72, 0x0a, 0x16, 0x56, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x24, 0x0a, 0x06, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0c, 0x2e, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x52, 0x06, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x12, + 0x12, 0x0a, 0x04, 0x67, 0x74, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x67, + 0x74, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x04, 0x72, 0x6f, 0x77, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x0a, 0x2e, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x52, 0x6f, 0x77, 0x52, 0x04, 0x72, + 0x6f, 0x77, 0x73, 0x2a, 0x3e, 0x0a, 0x0b, 0x4f, 0x6e, 0x44, 0x44, 0x4c, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x49, 0x47, 0x4e, 0x4f, 0x52, 0x45, 0x10, 0x00, 0x12, 0x08, + 0x0a, 0x04, 0x53, 0x54, 0x4f, 0x50, 0x10, 0x01, 0x12, 0x08, 0x0a, 0x04, 0x45, 0x58, 0x45, 0x43, + 0x10, 0x02, 0x12, 0x0f, 0x0a, 0x0b, 0x45, 0x58, 0x45, 0x43, 0x5f, 0x49, 0x47, 0x4e, 0x4f, 0x52, + 0x45, 0x10, 0x03, 0x2a, 0x7b, 0x0a, 0x18, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x0f, 0x0a, 0x0b, 0x4d, 0x61, 0x74, 0x65, 0x72, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x10, 0x00, + 0x12, 0x0e, 0x0a, 0x0a, 0x4d, 0x6f, 0x76, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x10, 0x01, + 0x12, 0x15, 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, + 0x49, 0x6e, 0x64, 0x65, 0x78, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x4d, 0x69, 0x67, 0x72, 0x61, + 0x74, 0x65, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x68, 0x61, 0x72, 0x64, 0x10, + 0x04, 0x12, 0x0d, 0x0a, 0x09, 0x4f, 0x6e, 0x6c, 0x69, 0x6e, 0x65, 0x44, 0x44, 0x4c, 0x10, 0x05, + 0x2a, 0x44, 0x0a, 0x1b, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x75, 0x62, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x08, 0x0a, 0x04, 0x4e, 0x6f, 0x6e, 0x65, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x61, 0x72, + 0x74, 0x69, 0x61, 0x6c, 0x10, 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x74, 0x6f, 0x6d, 0x69, 0x63, + 0x43, 0x6f, 0x70, 0x79, 0x10, 0x02, 0x2a, 0x71, 0x0a, 0x19, 0x56, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x10, 0x00, + 0x12, 0x08, 0x0a, 0x04, 0x49, 0x6e, 0x69, 0x74, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x74, + 0x6f, 0x70, 0x70, 0x65, 0x64, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x6f, 0x70, 0x79, 0x69, + 0x6e, 0x67, 0x10, 0x03, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x10, + 0x04, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, + 0x4c, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x10, 0x06, 0x2a, 0xa1, 0x02, 0x0a, 0x0a, 0x56, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, 0x4e, + 0x4f, 0x57, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x47, 0x54, 0x49, 0x44, 0x10, 0x01, 0x12, + 0x09, 0x0a, 0x05, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0a, 0x0a, 0x06, 0x43, 0x4f, + 0x4d, 0x4d, 0x49, 0x54, 0x10, 0x03, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, + 0x43, 0x4b, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x44, 0x44, 0x4c, 0x10, 0x05, 0x12, 0x0a, 0x0a, + 0x06, 0x49, 0x4e, 0x53, 0x45, 0x52, 0x54, 0x10, 0x06, 0x12, 0x0b, 0x0a, 0x07, 0x52, 0x45, 0x50, + 0x4c, 0x41, 0x43, 0x45, 0x10, 0x07, 0x12, 0x0a, 0x0a, 0x06, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, + 0x10, 0x08, 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x10, 0x09, 0x12, 0x07, + 0x0a, 0x03, 0x53, 0x45, 0x54, 0x10, 0x0a, 0x12, 0x09, 0x0a, 0x05, 0x4f, 0x54, 0x48, 0x45, 0x52, + 0x10, 0x0b, 0x12, 0x07, 0x0a, 0x03, 0x52, 0x4f, 0x57, 0x10, 0x0c, 0x12, 0x09, 0x0a, 0x05, 0x46, + 0x49, 0x45, 0x4c, 0x44, 0x10, 0x0d, 0x12, 0x0d, 0x0a, 0x09, 0x48, 0x45, 0x41, 0x52, 0x54, 0x42, + 0x45, 0x41, 0x54, 0x10, 0x0e, 0x12, 0x09, 0x0a, 0x05, 0x56, 0x47, 0x54, 0x49, 0x44, 0x10, 0x0f, + 0x12, 0x0b, 0x0a, 0x07, 0x4a, 0x4f, 0x55, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x10, 0x12, 0x0b, 0x0a, + 0x07, 0x56, 0x45, 0x52, 0x53, 0x49, 0x4f, 0x4e, 0x10, 0x11, 0x12, 0x0a, 0x0a, 0x06, 0x4c, 0x41, + 0x53, 0x54, 0x50, 0x4b, 0x10, 0x12, 0x12, 0x0d, 0x0a, 0x09, 0x53, 0x41, 0x56, 0x45, 0x50, 0x4f, + 0x49, 0x4e, 0x54, 0x10, 0x13, 0x12, 0x12, 0x0a, 0x0e, 0x43, 0x4f, 0x50, 0x59, 0x5f, 0x43, 0x4f, + 0x4d, 0x50, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x14, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x52, 0x45, + 0x56, 0x49, 0x4f, 0x55, 0x53, 0x5f, 0x47, 0x54, 0x49, 0x44, 0x53, 0x10, 0x15, 0x2a, 0x27, 0x0a, + 0x0d, 0x4d, 0x69, 0x67, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, + 0x0a, 0x06, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x53, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x53, 0x48, + 0x41, 0x52, 0x44, 0x53, 0x10, 0x01, 0x42, 0x29, 0x5a, 0x27, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, + 0x2e, 0x69, 0x6f, 0x2f, 0x76, 0x69, 0x74, 0x65, 0x73, 0x73, 0x2f, 0x67, 0x6f, 0x2f, 0x76, 0x74, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x62, 0x69, 0x6e, 0x6c, 0x6f, 0x67, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go index 93b378738dd..7cef8b7f983 100644 --- a/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go +++ b/go/vt/proto/binlogdata/binlogdata_vtproto.pb.go @@ -512,6 +512,9 @@ func (m *VEvent) CloneVT() *VEvent { r.Shard = m.Shard r.Throttled = m.Throttled r.ThrottledReason = m.ThrottledReason + r.LastCommitted = m.LastCommitted + r.SequenceNumber = m.SequenceNumber + r.MustSave = m.MustSave if len(m.unknownFields) > 0 { r.unknownFields = make([]byte, len(m.unknownFields)) copy(r.unknownFields, m.unknownFields) @@ -2170,6 +2173,32 @@ func (m *VEvent) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i -= len(m.unknownFields) copy(dAtA[i:], m.unknownFields) } + if m.MustSave { + i-- + if m.MustSave { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe0 + } + if m.SequenceNumber != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.SequenceNumber)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.LastCommitted != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.LastCommitted)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd0 + } if len(m.ThrottledReason) > 0 { i -= len(m.ThrottledReason) copy(dAtA[i:], m.ThrottledReason) @@ -3891,6 +3920,15 @@ func (m *VEvent) SizeVT() (n int) { if l > 0 { n += 2 + l + protohelpers.SizeOfVarint(uint64(l)) } + if m.LastCommitted != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.LastCommitted)) + } + if m.SequenceNumber != 0 { + n += 2 + protohelpers.SizeOfVarint(uint64(m.SequenceNumber)) + } + if m.MustSave { + n += 3 + } n += len(m.unknownFields) return n } @@ -8245,6 +8283,64 @@ func (m *VEvent) UnmarshalVT(dAtA []byte) error { } m.ThrottledReason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 26: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LastCommitted", wireType) + } + m.LastCommitted = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LastCommitted |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SequenceNumber", wireType) + } + m.SequenceNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SequenceNumber |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 28: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MustSave", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MustSave = bool(v != 0) default: iNdEx = preIndex skippy, err := protohelpers.Skip(dAtA[iNdEx:]) diff --git a/go/vt/vttablet/common/flags.go b/go/vt/vttablet/common/flags.go index 75e8e58982f..a2df626ec9d 100644 --- a/go/vt/vttablet/common/flags.go +++ b/go/vt/vttablet/common/flags.go @@ -30,6 +30,7 @@ const ( VReplicationExperimentalFlagOptimizeInserts = int64(1) VReplicationExperimentalFlagAllowNoBlobBinlogRowImage = int64(2) VReplicationExperimentalFlagVPlayerBatching = int64(4) + VReplicationExperimentalFlagVPlayerParallel = int64(8) ) var ( diff --git a/go/vt/vttablet/tabletmanager/vreplication/controller.go b/go/vt/vttablet/tabletmanager/vreplication/controller.go index 7067211ff10..e138b96c9be 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/controller.go +++ b/go/vt/vttablet/tabletmanager/vreplication/controller.go @@ -77,6 +77,8 @@ type controller struct { WorkflowConfig *vttablet.VReplicationConfig } +type dbClientGenerator func() (binlogplayer.DBClient, error) + func processWorkflowOptions(params map[string]string) (*vttablet.VReplicationConfig, error) { options, ok := params["options"] if !ok { @@ -296,7 +298,26 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { } defer vsClient.Close(ctx) - vr := newVReplicator(ct.id, ct.source, vsClient, ct.blpStats, dbClient, ct.mysqld, ct.vre, ct.WorkflowConfig) + var dbClients []binlogplayer.DBClient + dbClientGen := func() (binlogplayer.DBClient, error) { + dbClient := ct.dbClientFactory() + if err := dbClient.Connect(); err != nil { + return nil, err + } + if ct.source.Filter != nil { + if err := setDBClientSettings(dbClient, ct.WorkflowConfig); err != nil { + return nil, err + } + } + dbClients = append(dbClients, dbClient) + return dbClient, nil + } + defer func() { + for _, dbClient := range dbClients { + dbClient.Close() + } + }() + vr := newVReplicator(ct.id, ct.source, vsClient, ct.blpStats, dbClient, dbClientGen, ct.mysqld, ct.vre, ct.WorkflowConfig) err = vr.Replicate(ctx) ct.lastWorkflowError.Record(err) @@ -309,7 +330,7 @@ func (ct *controller) runBlp(ctx context.Context) (err error) { isUnrecoverableError(err) || !ct.lastWorkflowError.ShouldRetry() { err = vterrors.Wrapf(err, TerminalErrorIndicator) - if errSetState := vr.setState(binlogdatapb.VReplicationWorkflowState_Error, err.Error()); errSetState != nil { + if errSetState := vr.setState(binlogdatapb.VReplicationWorkflowState_Error, nil, err.Error()); errSetState != nil { log.Errorf("INTERNAL: unable to setState() in controller: %v. Could not set error text to: %v.", errSetState, err) return err // yes, err and not errSetState. } diff --git a/go/vt/vttablet/tabletmanager/vreplication/relaylog.go b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go index 058ca29ff78..ffce7fdcef7 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/relaylog.go +++ b/go/vt/vttablet/tabletmanager/vreplication/relaylog.go @@ -103,6 +103,9 @@ func (rl *relayLog) Fetch() ([][]*binlogdatapb.VEvent, error) { } cancelTimer := rl.startFetchTimer() defer cancelTimer() + // TODO(shlomi): should we use: + // for len(rl.items) <= rl.maxItems/2 && !rl.timedout { + // so that we work on larger buffers of events? for len(rl.items) == 0 && !rl.timedout { rl.hasItems.Wait() if err := rl.checkDone(); err != nil { diff --git a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go index 15093e299fc..1e3f3e90917 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/utils_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/utils_test.go @@ -162,6 +162,11 @@ func TestIsUnrecoverableError(t *testing.T) { err: sqlerror.NewSQLError(sqlerror.ERErrorDuringCommit, "unknown", "ERROR HY000: Got error 149 - 'Lock deadlock; Retry transaction' during COMMIT"), expected: false, }, + { + name: "Duplicate key from textual error", + err: errors.New("error applying event for table stress_test while processing position 1172a240-bba1-11ef-a3a6-5b208b5d2dca:1-9028: Duplicate entry '681' for key '_vt_vrp_17b345d8bba111ef9a43b27afeff3c84_20241216133004_.PRIMARY' (errno 1062) (sqlstate 23000) during query: insert into _vt_vrp_17b345d8bba111ef9a43b27afeff3c84_20241216133004_(id,rand_val,op_order,hint_col,created_timestamp,updates) values (681,'9b8f6ae6',6657,'','2024-12-16 11:30:09',0)"), + expected: true, + }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { diff --git a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go index e3a12258691..d558fd87ecf 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vcopier.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vcopier.go @@ -242,7 +242,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { if _, err := vc.vr.dbClient.Execute(buf.String()); err != nil { return err } - if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Copying, ""); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Copying, nil, ""); err != nil { return err } vc.vr.insertLog(LogCopyStart, fmt.Sprintf("Copy phase started for %d table(s)", len(plan.TargetTables))) @@ -265,7 +265,7 @@ func (vc *vcopier) initTablesForCopy(ctx context.Context) error { } } } else { - if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "There is nothing to replicate"); err != nil { + if err := vc.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, nil, "There is nothing to replicate"); err != nil { return err } } diff --git a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go index 63538be881d..c24ddc2d5c4 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vdbclient.go @@ -44,6 +44,9 @@ type vdbClient struct { batchSize int64 maxBatchSize int64 relayLogMaxItems int + + TemporaryDevTrackingCountCommits int // TODO(shlomi): remove this variable + TemporaryDevTrackingCountBatchedCommits int // TODO(shlomi): remove this variable } func newVDBClient(dbclient binlogplayer.DBClient, stats *binlogplayer.Stats, relayLogMaxItems int) *vdbClient { @@ -81,6 +84,7 @@ func (vc *vdbClient) Commit() error { if err := vc.DBClient.Commit(); err != nil { return err } + vc.TemporaryDevTrackingCountCommits++ vc.InTransaction = false vc.queries = nil vc.batchSize = 0 @@ -98,6 +102,7 @@ func (vc *vdbClient) CommitTrxQueryBatch() error { for _, err := vc.DBClient.ExecuteFetchMulti(queries, -1); err != nil; { return err } + vc.TemporaryDevTrackingCountBatchedCommits++ vc.InTransaction = false vc.queries = nil vc.queriesPos = 0 diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go index 31ab895934c..3ce8a6e6472 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vplayer.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer.go @@ -23,7 +23,8 @@ import ( "io" "math" "strconv" - "strings" + "sync" + "sync/atomic" "time" "vitess.io/vitess/go/mysql/replication" @@ -59,6 +60,7 @@ type vplayer struct { replicatorPlan *ReplicatorPlan tablePlans map[string]*TablePlan + planMu sync.Mutex // These are set when creating the VPlayer based on whether the VPlayer // is in batch (stmt and trx) execution mode or not. @@ -66,14 +68,17 @@ type vplayer struct { commit func() error // If the VPlayer is in batch mode, we accumulate each transaction's statements // that are then sent as a single multi-statement protocol request to the database. - batchMode bool + batchMode bool + parallelMode bool - pos replication.Position + pos atomic.Pointer[replication.Position] + lastPosTs int64 + posMu sync.Mutex // unsavedEvent is set any time we skip an event without // saving, which is on an empty commit. // If nothing else happens for idleTimeout since timeLastSaved, // the position of the unsavedEvent gets saved. - unsavedEvent *binlogdatapb.VEvent + unsavedEvent atomic.Pointer[binlogdatapb.VEvent] // timeLastSaved is set every time a GTID is saved. timeLastSaved time.Time // lastTimestampNs is the last timestamp seen so far. @@ -89,16 +94,6 @@ type vplayer struct { phase string throttlerAppName string - - // See updateFKCheck for more details on how the two fields below are used. - - // foreignKeyChecksEnabled is the current state of the foreign key checks for the current session. - // It reflects what we have set the @@session.foreign_key_checks session variable to. - foreignKeyChecksEnabled bool - - // foreignKeyChecksStateInitialized is set to true once we have initialized the foreignKeyChecksEnabled. - // The initialization is done on the first row event that this vplayer sees. - foreignKeyChecksStateInitialized bool } // NoForeignKeyCheckFlagBitmask is the bitmask for the 2nd bit (least significant) of the flags in a binlog row event. @@ -133,6 +128,10 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map commitFunc := func() error { return vr.dbClient.Commit() } + parallelMode := false + if vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerParallel != 0 { + parallelMode = true + } batchMode := false // We only do batching in the running/replicating phase. if len(copyState) == 0 && vr.workflowConfig.ExperimentalFlags&vttablet.VReplicationExperimentalFlagVPlayerBatching != 0 { @@ -168,10 +167,9 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map vr.dbClient.maxBatchSize = maxAllowedPacket } - return &vplayer{ + vp := &vplayer{ vr: vr, startPos: settings.StartPos, - pos: settings.StartPos, stopPos: settings.StopPos, saveStop: saveStop, copyState: copyState, @@ -182,7 +180,10 @@ func newVPlayer(vr *vreplicator, settings binlogplayer.VRSettings, copyState map query: queryFunc, commit: commitFunc, batchMode: batchMode, + parallelMode: parallelMode, } + vp.pos.Store(&settings.StartPos) + return vp } // play is the entry point for playing binlogs. @@ -190,7 +191,7 @@ func (vp *vplayer) play(ctx context.Context) error { if !vp.stopPos.IsZero() && vp.startPos.AtLeast(vp.stopPos) { log.Infof("Stop position %v already reached: %v", vp.startPos, vp.stopPos) if vp.saveStop { - return vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) + return vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, nil, fmt.Sprintf("Stop position %v already reached: %v", vp.startPos, vp.stopPos)) } return nil } @@ -211,48 +212,8 @@ func (vp *vplayer) play(ctx context.Context) error { } } - return vp.fetchAndApply(ctx) -} - -// updateFKCheck updates the @@session.foreign_key_checks variable based on the binlog row event flags. -// The function only does it if it has changed to avoid redundant updates, using the cached vplayer.foreignKeyChecksEnabled -// The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: -// - If set (1), foreign key checks are disabled. -// - If unset (0), foreign key checks are enabled. -// updateFKCheck also updates the state for the first row event that this vplayer, and hence the db connection, sees. -func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { - mustUpdate := false - if vp.vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { - // If this is an atomic copy, we must update the foreign_key_checks state even when the vplayer runs during - // the copy phase, i.e., for catchup and fastforward. - mustUpdate = true - } else if vp.vr.state == binlogdatapb.VReplicationWorkflowState_Running { - // If the vreplication workflow is in Running state, we must update the foreign_key_checks - // state for all workflow types. - mustUpdate = true - } - if !mustUpdate { - return nil - } - dbForeignKeyChecksEnabled := true - if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { - dbForeignKeyChecksEnabled = false - } - - if vp.foreignKeyChecksStateInitialized /* already set earlier */ && - dbForeignKeyChecksEnabled == vp.foreignKeyChecksEnabled /* no change in the state, no need to update */ { - return nil - } - log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) - if _, err := vp.query(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { - return fmt.Errorf("failed to set session foreign_key_checks: %w", err) - } - vp.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled - if !vp.foreignKeyChecksStateInitialized { - log.Infof("First foreign_key_checks update to: %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) - vp.foreignKeyChecksStateInitialized = true - } - return nil + err = vp.fetchAndApply(ctx) + return err } // fetchAndApply performs the fetching and application of the binlogs. @@ -268,11 +229,23 @@ func (vp *vplayer) updateFKCheck(ctx context.Context, flags2 uint32) error { func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { log.Infof("Starting VReplication player id: %v, startPos: %v, stop: %v, filter: %v", vp.vr.id, vp.startPos, vp.stopPos, vp.vr.source) + var wg sync.WaitGroup + defer wg.Wait() + ctx, cancel := context.WithCancel(ctx) defer cancel() relay := newRelayLog(ctx, vp.vr.workflowConfig.RelayLogMaxItems, vp.vr.workflowConfig.RelayLogMaxSize) + parallelWorkers := 1 + if vp.parallelMode { + parallelWorkers = defaultParallelWorkersPoolSize + } + parallelPool, err := newParallelWorkersPool(ctx, parallelWorkers, vp.vr.dbClientGen, vp) + if err != nil { + return vterrors.Wrapf(err, "failed to create parallel events buffer") + } + streamErr := make(chan error, 1) go func() { vstreamOptions := &binlogdatapb.VStreamOptions{ @@ -285,8 +258,24 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { }() applyErr := make(chan error, 1) + wg.Add(1) go func() { - applyErr <- vp.applyEvents(ctx, relay) + defer wg.Done() + var err error + log.Errorf("======= QQQ applying events. err=%v", err) + err = errors.Join(err, vp.applyEvents(ctx, relay, parallelPool)) + log.Errorf("======= QQQ about to drain. err=%v", err) + err = errors.Join(err, parallelPool.drain(ctx)) + log.Errorf("======= QQQ drain complete. err=%v", err) + applyErr <- err + countCommits := 0 + countBatchedCommits := 0 + for _, w := range parallelPool.workers { + countCommits += w.dbClient.TemporaryDevTrackingCountCommits + countBatchedCommits += w.dbClient.TemporaryDevTrackingCountBatchedCommits + } + log.Errorf("======= QQQ maxBatchedCommitsPerWorker: %v, countCommits: %v, countBatchedCommits: %v", parallelPool.maxBatchedCommitsPerWorker, countCommits, countBatchedCommits) + log.Errorf("======= QQQ parallel workers: %v, maxConcurrency: %v", len(parallelPool.workers), parallelPool.maxConcurrency.Load()) }() select { @@ -301,7 +290,7 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { // is shutting down and canceled the context, or stop position was reached, // or a journal event was encountered. // If so, we return nil which will cause the controller to not retry. - if err == io.EOF { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { return nil } return err @@ -319,88 +308,42 @@ func (vp *vplayer) fetchAndApply(ctx context.Context) (err error) { } // If the stream ends normally we have to return an error indicating // that the controller has to retry a different vttablet. - if err == nil || err == io.EOF { + if err == nil || errors.Is(vterrors.UnwrapAll(err), io.EOF) { return errors.New("vstream ended") } return err } } -// applyStmtEvent applies an actual DML statement received from the source, directly onto the backend database -func (vp *vplayer) applyStmtEvent(ctx context.Context, event *binlogdatapb.VEvent) error { - sql := event.Statement - if sql == "" { - sql = event.Dml - } - if event.Type == binlogdatapb.VEventType_SAVEPOINT || vp.canAcceptStmtEvents { - start := time.Now() - _, err := vp.query(ctx, sql) - vp.vr.stats.QueryTimings.Record(vp.phase, start) - vp.vr.stats.QueryCount.Add(vp.phase, 1) - return err - } - return fmt.Errorf("filter rules are not supported for SBR replication: %v", vp.vr.source.Filter.GetRules()) -} - -func (vp *vplayer) applyRowEvent(ctx context.Context, rowEvent *binlogdatapb.RowEvent) error { - if err := vp.updateFKCheck(ctx, rowEvent.Flags); err != nil { - return err - } - tplan := vp.tablePlans[rowEvent.TableName] - if tplan == nil { - return fmt.Errorf("unexpected event on table %s", rowEvent.TableName) - } - applyFunc := func(sql string) (*sqltypes.Result, error) { - stats := NewVrLogStats("ROWCHANGE") - start := time.Now() - qr, err := vp.query(ctx, sql) - vp.vr.stats.QueryCount.Add(vp.phase, 1) - vp.vr.stats.QueryTimings.Record(vp.phase, start) - stats.Send(sql) - return qr, err - } - - if vp.batchMode && len(rowEvent.RowChanges) > 1 { - // If we have multiple delete row events for a table with a single PK column - // then we can perform a simple bulk DELETE using an IN clause. - if (rowEvent.RowChanges[0].Before != nil && rowEvent.RowChanges[0].After == nil) && - tplan.MultiDelete != nil { - _, err := tplan.applyBulkDeleteChanges(rowEvent.RowChanges, applyFunc, vp.vr.dbClient.maxBatchSize) - return err - } - // If we're done with the copy phase then we will be replicating all INSERTS - // regardless of the PK value and can use a single INSERT statment with - // multiple VALUES clauses. - if len(vp.copyState) == 0 && (rowEvent.RowChanges[0].Before == nil && rowEvent.RowChanges[0].After != nil) { - _, err := tplan.applyBulkInsertChanges(rowEvent.RowChanges, applyFunc, vp.vr.dbClient.maxBatchSize) - return err - } - } - - for _, change := range rowEvent.RowChanges { - if _, err := tplan.applyChange(change, applyFunc); err != nil { - return err - } - } - - return nil -} - // updatePos should get called at a minimum of vreplicationMinimumHeartbeatUpdateInterval. -func (vp *vplayer) updatePos(ctx context.Context, ts int64) (posReached bool, err error) { - update := binlogplayer.GenerateUpdatePos(vp.vr.id, vp.pos, time.Now().Unix(), ts, vp.vr.stats.CopyRowCount.Get(), vp.vr.workflowConfig.StoreCompressedGTID) - if _, err := vp.query(ctx, update); err != nil { +func (vp *vplayer) updatePos( + ctx context.Context, + ts int64, + queryFunc func(ctx context.Context, sql string) (*sqltypes.Result, error), + dbClient *vdbClient, +) (posReached bool, err error) { + vp.posMu.Lock() + defer vp.posMu.Unlock() + + if ts <= vp.lastPosTs { + // Skip if the timestamp is not increasing. + return false, nil + } + vp.lastPosTs = ts + + update := binlogplayer.GenerateUpdatePos(vp.vr.id, *vp.pos.Load(), time.Now().Unix(), ts, vp.vr.stats.CopyRowCount.Get(), vp.vr.workflowConfig.StoreCompressedGTID) + if _, err := queryFunc(ctx, update); err != nil { return false, fmt.Errorf("error %v updating position", err) } vp.numAccumulatedHeartbeats = 0 - vp.unsavedEvent = nil + vp.unsavedEvent.Store(nil) vp.timeLastSaved = time.Now() - vp.vr.stats.SetLastPosition(vp.pos) - posReached = !vp.stopPos.IsZero() && vp.pos.AtLeast(vp.stopPos) + vp.vr.stats.SetLastPosition(*vp.pos.Load()) + posReached = !vp.stopPos.IsZero() && vp.pos.Load().AtLeast(vp.stopPos) if posReached { log.Infof("Stopped at position: %v", vp.stopPos) if vp.saveStop { - if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { + if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, dbClient, fmt.Sprintf("Stopped at position %v", vp.stopPos)); err != nil { return false, err } } @@ -479,9 +422,7 @@ func (vp *vplayer) recordHeartbeat() error { // this from becoming a tight loop. // TODO(sougou): we can look at recognizing self-generated events and find a better // way to handle them. -func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { - defer vp.vr.dbClient.Rollback() - +func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog, parallelPool *parallelWorkersPool) error { estimateLag := func() { behind := time.Now().UnixNano() - vp.lastTimestampNs - vp.timeOffsetNs vp.vr.stats.ReplicationLagSeconds.Store(behind / 1e9) @@ -494,10 +435,24 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { defer vp.vr.stats.ReplicationLagSeconds.Store(math.MaxInt64) defer vp.vr.stats.VReplicationLags.Add(strconv.Itoa(int(vp.vr.id)), math.MaxInt64) var lagSecs int64 + var lastSequenceNumber int64 = -1 + firstInBinlog := true + countCommitsPerWorker := 0 + maxBatchedCommitsPerWorker := 0 + batchNextWorkerCommit := false + var pw *parallelWorker + defer func() { + if pw != nil { + pw.applyEvent(ctx, terminateWorkerEvent) + } + }() for { if ctx.Err() != nil { return ctx.Err() } + if parallelPool.posReached.Load() { + return io.EOF + } // Check throttler. if checkResult, ok := vp.vr.vre.throttlerClient.ThrottleCheckOKOrWaitAppName(ctx, throttlerapp.Name(vp.throttlerAppName)); !ok { _ = vp.vr.updateTimeThrottled(throttlerapp.VPlayerName, checkResult.Summary()) @@ -516,8 +471,8 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { // 2. We've been receiving empty events for longer than idleTimeout. // In both cases, now > timeLastSaved. If so, the GTID of the last unsavedEvent // must be saved. - if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedEvent != nil { - posReached, err := vp.updatePos(ctx, vp.unsavedEvent.Timestamp) + if time.Since(vp.timeLastSaved) >= idleTimeout && vp.unsavedEvent.Load() != nil { + posReached, err := vp.updatePos(ctx, vp.unsavedEvent.Load().Timestamp, vp.query, nil) if err != nil { return err } @@ -542,45 +497,58 @@ func (vp *vplayer) applyEvents(ctx context.Context, relay *relayLog) error { lagSecs = event.CurrentTime/1e9 - event.Timestamp } } - mustSave := false switch event.Type { + case binlogdatapb.VEventType_PREVIOUS_GTIDS: + lastSequenceNumber = -1 + firstInBinlog = true + batchNextWorkerCommit = false + continue case binlogdatapb.VEventType_COMMIT: // If we've reached the stop position, we must save the current commit // even if it's empty. So, the next applyEvent is invoked with the // mustSave flag. - if !vp.stopPos.IsZero() && vp.pos.AtLeast(vp.stopPos) { - mustSave = true + if !vp.stopPos.IsZero() && vp.pos.Load().AtLeast(vp.stopPos) { + event.MustSave = true break } + countCommitsPerWorker++ // In order to group multiple commits into a single one, we look ahead for // the next commit. If there is one, we skip the current commit, which ends up // applying the next set of events as part of the current transaction. This approach // also handles the case where the last transaction is partial. In that case, // we only group the transactions with commits we've seen so far. - if hasAnotherCommit(items, i, j+1) { + if countCommitsPerWorker < parallelPool.maxBatchedCommitsPerWorker && hasAnotherCommit(items, i, j+1) { + batchNextWorkerCommit = true continue } } - if err := vp.applyEvent(ctx, event, mustSave); err != nil { - if err != io.EOF { - vp.vr.stats.ErrorCounts.Add([]string{"Apply"}, 1) - var table, tableLogMsg, gtidLogMsg string - switch { - case event.GetFieldEvent() != nil: - table = event.GetFieldEvent().TableName - case event.GetRowEvent() != nil: - table = event.GetRowEvent().TableName - } - if table != "" { - tableLogMsg = fmt.Sprintf(" for table %s", table) + if event.SequenceNumber > lastSequenceNumber { + if batchNextWorkerCommit { + batchNextWorkerCommit = false + } else { + if pw != nil { + // Worker errors come asynchronously, because of course the worker applies events + // asynchronously. So we just seize periodic opportunities to check for errors. + // This one is a good opportunity. + if err := parallelPool.workersError(); err != nil { + return err + } + // Let the worker know its work is done + pw.applyEvent(ctx, terminateWorkerEvent) + if countCommitsPerWorker > maxBatchedCommitsPerWorker { + maxBatchedCommitsPerWorker = countCommitsPerWorker + } + countCommitsPerWorker = 0 } - pos := getNextPosition(items, i, j+1) - if pos != "" { - gtidLogMsg = fmt.Sprintf(" while processing position %s", pos) + pw, err = parallelPool.availableWorker(ctx, event.LastCommitted, event.SequenceNumber, firstInBinlog) + if err != nil { + return err } - log.Errorf("Error applying event%s%s: %s", tableLogMsg, gtidLogMsg, err.Error()) - err = vterrors.Wrapf(err, "error applying event%s%s", tableLogMsg, gtidLogMsg) + firstInBinlog = false } + lastSequenceNumber = event.SequenceNumber + } + if err := pw.applyEvent(ctx, event); err != nil { return err } } @@ -640,220 +608,3 @@ func getNextPosition(items [][]*binlogdatapb.VEvent, i, j int) string { } return "" } - -func (vp *vplayer) applyEvent(ctx context.Context, event *binlogdatapb.VEvent, mustSave bool) error { - stats := NewVrLogStats(event.Type.String()) - switch event.Type { - case binlogdatapb.VEventType_GTID: - pos, err := binlogplayer.DecodePosition(event.Gtid) - if err != nil { - return err - } - vp.pos = pos - // A new position should not be saved until a saveable event occurs. - vp.unsavedEvent = nil - if vp.stopPos.IsZero() { - return nil - } - case binlogdatapb.VEventType_BEGIN: - // No-op: begin is called as needed. - case binlogdatapb.VEventType_COMMIT: - if mustSave { - if err := vp.vr.dbClient.Begin(); err != nil { - return err - } - } - - if !vp.vr.dbClient.InTransaction { - // We're skipping an empty transaction. We may have to save the position on inactivity. - vp.unsavedEvent = event - return nil - } - posReached, err := vp.updatePos(ctx, event.Timestamp) - if err != nil { - return err - } - if err := vp.commit(); err != nil { - return err - } - if posReached { - return io.EOF - } - case binlogdatapb.VEventType_FIELD: - if err := vp.vr.dbClient.Begin(); err != nil { - return err - } - tplan, err := vp.replicatorPlan.buildExecutionPlan(event.FieldEvent) - if err != nil { - return err - } - vp.tablePlans[event.FieldEvent.TableName] = tplan - stats.Send(fmt.Sprintf("%v", event.FieldEvent)) - - case binlogdatapb.VEventType_INSERT, binlogdatapb.VEventType_DELETE, binlogdatapb.VEventType_UPDATE, - binlogdatapb.VEventType_REPLACE, binlogdatapb.VEventType_SAVEPOINT: - // use event.Statement if available, preparing for deprecation in 8.0 - sql := event.Statement - if sql == "" { - sql = event.Dml - } - // If the event is for one of the AWS RDS "special" or pt-table-checksum tables, we skip - if !strings.Contains(sql, " mysql.rds_") && !strings.Contains(sql, " percona.checksums") { - // This is a player using statement based replication - if err := vp.vr.dbClient.Begin(); err != nil { - return err - } - if err := vp.applyStmtEvent(ctx, event); err != nil { - return err - } - stats.Send(sql) - } - case binlogdatapb.VEventType_ROW: - // This player is configured for row based replication - if err := vp.vr.dbClient.Begin(); err != nil { - return err - } - if err := vp.applyRowEvent(ctx, event.RowEvent); err != nil { - log.Infof("Error applying row event: %s", err.Error()) - return err - } - // Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed - // time for the Row event. - stats.Send(fmt.Sprintf("%v", event.RowEvent)) - case binlogdatapb.VEventType_OTHER: - if vp.vr.dbClient.InTransaction { - // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) - return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) - } - // Just update the position. - posReached, err := vp.updatePos(ctx, event.Timestamp) - if err != nil { - return err - } - if posReached { - return io.EOF - } - case binlogdatapb.VEventType_DDL: - if vp.vr.dbClient.InTransaction { - // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) - return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) - } - vp.vr.stats.DDLEventActions.Add(vp.vr.source.OnDdl.String(), 1) // Record the DDL handling - switch vp.vr.source.OnDdl { - case binlogdatapb.OnDDLAction_IGNORE: - // We still have to update the position. - posReached, err := vp.updatePos(ctx, event.Timestamp) - if err != nil { - return err - } - if posReached { - return io.EOF - } - case binlogdatapb.OnDDLAction_STOP: - if err := vp.vr.dbClient.Begin(); err != nil { - return err - } - if _, err := vp.updatePos(ctx, event.Timestamp); err != nil { - return err - } - if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { - return err - } - if err := vp.commit(); err != nil { - return err - } - return io.EOF - case binlogdatapb.OnDDLAction_EXEC: - // It's impossible to save the position transactionally with the statement. - // So, we apply the DDL first, and then save the position. - // Manual intervention may be needed if there is a partial - // failure here. - if _, err := vp.query(ctx, event.Statement); err != nil { - return err - } - stats.Send(fmt.Sprintf("%v", event.Statement)) - posReached, err := vp.updatePos(ctx, event.Timestamp) - if err != nil { - return err - } - if posReached { - return io.EOF - } - case binlogdatapb.OnDDLAction_EXEC_IGNORE: - if _, err := vp.query(ctx, event.Statement); err != nil { - log.Infof("Ignoring error: %v for DDL: %s", err, event.Statement) - } - stats.Send(fmt.Sprintf("%v", event.Statement)) - posReached, err := vp.updatePos(ctx, event.Timestamp) - if err != nil { - return err - } - if posReached { - return io.EOF - } - } - case binlogdatapb.VEventType_JOURNAL: - if vp.vr.dbClient.InTransaction { - // Unreachable - log.Errorf("internal error: vplayer is in a transaction on event: %v", event) - return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) - } - // Ensure that we don't have a partial set of table matches in the journal. - switch event.Journal.MigrationType { - case binlogdatapb.MigrationType_SHARDS: - // All tables of the source were migrated. So, no validation needed. - case binlogdatapb.MigrationType_TABLES: - // Validate that all or none of the tables are in the journal. - jtables := make(map[string]bool) - for _, table := range event.Journal.Tables { - jtables[table] = true - } - found := false - notFound := false - for tableName := range vp.replicatorPlan.TablePlans { - if _, ok := jtables[tableName]; ok { - found = true - } else { - notFound = true - } - } - switch { - case found && notFound: - // Some were found and some were not found. We can't handle this. - if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "unable to handle journal event: tables were partially matched"); err != nil { - return err - } - return io.EOF - case notFound: - // None were found. Ignore journal. - return nil - } - // All were found. We must register journal. - } - log.Infof("Binlog event registering journal event %+v", event.Journal) - if err := vp.vr.vre.registerJournal(event.Journal, vp.vr.id); err != nil { - if err := vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, err.Error()); err != nil { - return err - } - return io.EOF - } - stats.Send(fmt.Sprintf("%v", event.Journal)) - return io.EOF - case binlogdatapb.VEventType_HEARTBEAT: - if event.Throttled { - if err := vp.vr.updateTimeThrottled(throttlerapp.VStreamerName, event.ThrottledReason); err != nil { - return err - } - } - if !vp.vr.dbClient.InTransaction { - vp.numAccumulatedHeartbeats++ - if err := vp.recordHeartbeat(); err != nil { - return err - } - } - } - - return nil -} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool.go new file mode 100644 index 00000000000..34d88bd3f9e --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool.go @@ -0,0 +1,258 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "errors" + "io" + "sync" + "sync/atomic" + + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/vterrors" +) + +const ( + defaultParallelWorkersPoolSize = 8 + maxBatchedCommitsPerWorker = 10 +) + +type parallelWorkersPool struct { + workers []*parallelWorker + head int // position of head worker + pool chan *parallelWorker + mu sync.Mutex + workerErrors chan error + posReached atomic.Bool + + currentConcurrency atomic.Int64 + maxConcurrency atomic.Int64 + maxBatchedCommitsPerWorker int + + numCommits atomic.Int64 // temporary. TODO: remove + + wakeup sync.Cond +} + +func newParallelWorkersPool(ctx context.Context, size int, dbClientGen dbClientGenerator, vp *vplayer) (p *parallelWorkersPool, err error) { + p = ¶llelWorkersPool{ + workers: make([]*parallelWorker, size), + pool: make(chan *parallelWorker, size), + workerErrors: make(chan error, size*2), + } + p.wakeup.L = &p.mu + for i := range size { + w := ¶llelWorker{ + index: i, + pool: p, + vp: vp, + } + dbClient, err := dbClientGen() + if err != nil { + return nil, err + } + + w.dbClient = newVDBClient(dbClient, vp.vr.stats, 0) + _, err = vp.vr.setSQLMode(ctx, w.dbClient) + if err != nil { + return nil, err + } + if vp.batchMode { + log.Errorf("======= QQQ batchMode") + w.queryFunc = func(ctx context.Context, sql string) (*sqltypes.Result, error) { + if !w.dbClient.InTransaction { // Should be sent down the wire immediately + return w.dbClient.Execute(sql) + } + return nil, w.dbClient.AddQueryToTrxBatch(sql) // Should become part of the trx batch + } + w.dbClient.maxBatchSize = vp.vr.dbClient.maxBatchSize + } else { + w.queryFunc = func(ctx context.Context, sql string) (*sqltypes.Result, error) { + return w.dbClient.ExecuteWithRetry(ctx, sql) + } + } + p.workers[i] = w + p.pool <- w + } + if size > 1 { + p.maxBatchedCommitsPerWorker = maxBatchedCommitsPerWorker + } else { + p.maxBatchedCommitsPerWorker = vp.vr.workflowConfig.RelayLogMaxItems + } + return p, nil +} + +func (p *parallelWorkersPool) drain(ctx context.Context) (err error) { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + terminateWorkers := func() error { + p.mu.Lock() + defer p.mu.Unlock() + for _, w := range p.workers { + if err := w.applyEvent(ctx, terminateWorkerEvent); err != nil { + return err + } + } + return nil + } + if err := terminateWorkers(); err != nil { + return err + } + // Get all workers (ensures they're all idle): + for range len(p.workers) { + _, err := p.availableWorker(ctx, -1, -1, false) + if err != nil { + return vterrors.Wrapf(err, "drain aborted") + } + } + return p.workersError() +} + +func (p *parallelWorkersPool) recycleWorker(w *parallelWorker) { + p.pool <- w +} + +func (p *parallelWorkersPool) availableWorker(ctx context.Context, lastCommitted int64, sequenceNumber int64, firstInBinlog bool) (w *parallelWorker, err error) { + select { + case w = <-p.pool: + case err := <-p.workerErrors: + return nil, err + case <-ctx.Done(): + return nil, ctx.Err() + } + events := make(chan *binlogdatapb.VEvent, p.maxBatchedCommitsPerWorker*5) + + p.mu.Lock() + defer p.mu.Unlock() + + w.events = events + w.lastCommitted = lastCommitted + w.sequenceNumber = sequenceNumber + w.isFirstInBinlog = firstInBinlog + // log.Errorf("========== QQQ availableWorker w=%v initialized with seq=%v, lastComm=%v, firstInBinlog=%v", w.index, w.sequenceNumber, w.lastCommitted, firstInBinlog) + + if lastCommitted < 0 { + // Only happens when called by drain(), in which case there is no need for this worker + // to start applying events, nor will this worker be recycled. This is the end of the line. + return w, nil + } + + go func() { + if err := w.applyQueuedEvents(ctx); err != nil { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { + w.pool.posReached.Store(true) + } + p.workerErrors <- err + // log.Errorf("========== QQQ applyQueuedEvents worker %v is done with error=%v", w.index, err) + } + // log.Errorf("========== QQQ applyQueuedEvents worker %v is done!", w.index) + p.mu.Lock() + defer p.mu.Unlock() + + if w.index == p.head { + p.handoverHead(w.index) + // log.Errorf("========== QQQ applyQueuedEvents new head=%v with %d queued, first in binlog =%v", p.head, len(p.workers[p.head].events), p.workers[p.head].isFirstInBinlog) + } + w.lastCommitted = 0 + w.sequenceNumber = 0 + w.isFirstInBinlog = false + p.recycleWorker(w) + }() + return w, nil +} + +func (p *parallelWorkersPool) workersError() error { + select { + case err := <-p.workerErrors: + return err + default: + if p.posReached.Load() { + return io.EOF + } + return nil + } +} + +func (p *parallelWorkersPool) handoverHead(fromIndex int) { + p.head = (fromIndex + 1) % len(p.workers) + for _, w := range p.workers { + if w.index == fromIndex { + continue + } + p.wakeup.Broadcast() + } +} + +func (p *parallelWorkersPool) isApplicable(w *parallelWorker, event *binlogdatapb.VEvent) bool { + if w.index == p.head { + // head worker is always applicable + return true + } + + // Not head. + if w.isFirstInBinlog { + // First in the binary log. Only applicable when this worker is at head. + return false + } + if event.SequenceNumber == 0 { + // No info. We therefore execute sequentially. + return false + } + switch event.Type { + case binlogdatapb.VEventType_GTID, + binlogdatapb.VEventType_BEGIN, + binlogdatapb.VEventType_FIELD, + binlogdatapb.VEventType_ROW: + // logic to follow + default: + // Only parallelize row events. + return false + } + + for i := range len(p.workers) { + otherWorker := p.workers[(p.head+i)%len(p.workers)] // head based + if otherWorker.index == w.index { + // reached this worker. It is applicable. + return true + } + if otherWorker.sequenceNumber < 0 { + // Happens on draining. Skip this worker. + continue + } + if otherWorker.sequenceNumber == 0 { + // unknown event. + log.Errorf("========== QQQ isApplicable WHOA0 otherWorker %v sequenceNumber=%v", otherWorker.index, otherWorker.sequenceNumber) + return false + } + if otherWorker.isFirstInBinlog && i > 0 { + // log.Errorf("========== QQQ isApplicable: false, because worker %v sees otherWorker %v which is first in binlog at i=%v. head=%v", w.index, otherWorker.index, i, p.head) + // This means we've rotated a binary log. We therefore + // Wait until all previous binlog events are consumed. + return false + } + if otherWorker.sequenceNumber <= event.LastCommitted { + // worker w depends on a previous event that has not committed yet. + return false + } + } + // Never going to reach this code, because our loop will always eventually hit `otherWorker.index == w.index`. + return true +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool_test.go new file mode 100644 index 00000000000..3cc33e466d0 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_pool_test.go @@ -0,0 +1,112 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + vttablet "vitess.io/vitess/go/vt/vttablet/common" +) + +func newTestVPlayer(t *testing.T, ctx context.Context) *vplayer { + + tablet := addTablet(100) + defer deleteTablet(tablet) + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "t1", + }}, + } + bls := &binlogdatapb.BinlogSource{ + Keyspace: env.KeyspaceName, + Shard: env.ShardName, + Filter: filter, + } + id := int32(1) + vsclient := newTabletConnector(tablet) + stats := binlogplayer.NewStats() + defer stats.Stop() + dbClient := playerEngine.dbClientFactoryFiltered() + err := dbClient.Connect() + require.NoError(t, err) + defer dbClient.Close() + dbName := dbClient.DBName() + // Ensure there's a dummy vreplication workflow record + _, err = dbClient.ExecuteFetch(fmt.Sprintf("insert into _vt.vreplication (id, workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name, options) values (%d, 'test_workflow', '', '', 99999, 99999, 0, 0, 'Running', '%s', '{}') on duplicate key update workflow='test', source='', pos='', max_tps=99999, max_replication_lag=99999, time_updated=0, transaction_timestamp=0, state='Running', db_name='%s'", + id, dbName, dbName), 1) + require.NoError(t, err) + defer func() { + _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) + require.NoError(t, err) + }() + vr := newVReplicator(id, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + settings, _, err := vr.loadSettings(ctx, newVDBClient(dbClient, stats, vttablet.DefaultVReplicationConfig.RelayLogMaxItems)) + require.NoError(t, err) + + throttlerAppName := vr.throttlerAppName() + assert.Contains(t, throttlerAppName, "test_workflow") + assert.Contains(t, throttlerAppName, "vreplication") + assert.NotContains(t, throttlerAppName, "vcopier") + assert.NotContains(t, throttlerAppName, "vplayer") + + vp := newVPlayer(vr, settings, nil, replication.Position{}, "") + + return vp +} + +func newTestParallelWorkersPool(size int) *parallelWorkersPool { + p := ¶llelWorkersPool{ + workers: make([]*parallelWorker, size), + pool: make(chan *parallelWorker, size), + workerErrors: make(chan error, size*2), + } + p.wakeup.L = &p.mu + for i := range size { + w := ¶llelWorker{ + index: i, + pool: p, + } + p.workers[i] = w + p.pool <- w + } + p.maxBatchedCommitsPerWorker = 10 + return p +} + +func TestIsApplicable(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + p := newTestParallelWorkersPool(8) + require.NotNil(t, p) + + workers := make([]*parallelWorker, 4) + for i := range 4 { + w, err := p.availableWorker(ctx, 5, int64(i*100), i == 0) + require.NoError(t, err) + require.NotNil(t, w) + workers[i] = w + } +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker.go new file mode 100644 index 00000000000..1be1ad7b64c --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker.go @@ -0,0 +1,557 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "context" + "errors" + "fmt" + "io" + "strconv" + "strings" + "time" + + "vitess.io/vitess/go/mysql/replication" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/binlog/binlogplayer" + "vitess.io/vitess/go/vt/log" + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + "vitess.io/vitess/go/vt/vterrors" + "vitess.io/vitess/go/vt/vttablet/tabletserver/throttle/throttlerapp" +) + +var ( + terminateWorkerEvent = &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_UNKNOWN, + LastCommitted: 0, + SequenceNumber: 0, + MustSave: true, + } + errRetryEvent = errors.New("retry event") +) + +type parallelWorker struct { + pool *parallelWorkersPool + index int + lastCommitted int64 + sequenceNumber int64 + dbClient *vdbClient + queryFunc func(ctx context.Context, sql string) (*sqltypes.Result, error) + vp *vplayer + lastPos replication.Position + + // foreignKeyChecksEnabled is the current state of the foreign key checks for the current session. + // It reflects what we have set the @@session.foreign_key_checks session variable to. + foreignKeyChecksEnabled bool + // foreignKeyChecksStateInitialized is set to true once we have initialized the foreignKeyChecksEnabled. + // The initialization is done on the first row event that this vplayer sees. + foreignKeyChecksStateInitialized bool + isFirstInBinlog bool + events chan *binlogdatapb.VEvent + stats *VrLogStats +} + +// applyQueuedStmtEvent applies an actual DML statement received from the source, directly onto the backend database +func (w *parallelWorker) applyQueuedStmtEvent(ctx context.Context, event *binlogdatapb.VEvent) error { + vp := w.vp + sql := event.Statement + if sql == "" { + sql = event.Dml + } + if event.Type == binlogdatapb.VEventType_SAVEPOINT || vp.canAcceptStmtEvents { + start := time.Now() + _, err := w.queryFunc(ctx, sql) + vp.vr.stats.QueryTimings.Record(vp.phase, start) + vp.vr.stats.QueryCount.Add(vp.phase, 1) + return err + } + return fmt.Errorf("filter rules are not supported for SBR replication: %v", vp.vr.source.Filter.GetRules()) +} + +// updatePos should get called at a minimum of vreplicationMinimumHeartbeatUpdateInterval. +func (w *parallelWorker) updatePos(ctx context.Context, ts int64) (posReached bool, err error) { + return w.vp.updatePos(ctx, ts, w.queryFunc, w.dbClient) +} + +// updateFKCheck updates the @@session.foreign_key_checks variable based on the binlog row event flags. +// The function only does it if it has changed to avoid redundant updates, using the cached vplayer.foreignKeyChecksEnabled +// The foreign_key_checks value for a transaction is determined by the 2nd bit (least significant) of the flags: +// - If set (1), foreign key checks are disabled. +// - If unset (0), foreign key checks are enabled. +// updateFKCheck also updates the state for the first row event that this vplayer, and hence the db connection, sees. +func (w *parallelWorker) updateFKCheck(ctx context.Context, flags2 uint32) error { + mustUpdate := false + if w.vp.vr.WorkflowSubType == int32(binlogdatapb.VReplicationWorkflowSubType_AtomicCopy) { + // If this is an atomic copy, we must update the foreign_key_checks state even when the vplayer runs during + // the copy phase, i.e., for catchup and fastforward. + mustUpdate = true + } else if w.vp.vr.state == binlogdatapb.VReplicationWorkflowState_Running { + // If the vreplication workflow is in Running state, we must update the foreign_key_checks + // state for all workflow types. + mustUpdate = true + } + if !mustUpdate { + return nil + } + dbForeignKeyChecksEnabled := true + if flags2&NoForeignKeyCheckFlagBitmask == NoForeignKeyCheckFlagBitmask { + dbForeignKeyChecksEnabled = false + } + + if w.foreignKeyChecksStateInitialized /* already set earlier */ && + dbForeignKeyChecksEnabled == w.foreignKeyChecksEnabled /* no change in the state, no need to update */ { + return nil + } + log.Infof("Setting this session's foreign_key_checks to %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + if _, err := w.queryFunc(ctx, "set @@session.foreign_key_checks="+strconv.FormatBool(dbForeignKeyChecksEnabled)); err != nil { + return fmt.Errorf("failed to set session foreign_key_checks: %w", err) + } + w.foreignKeyChecksEnabled = dbForeignKeyChecksEnabled + if !w.foreignKeyChecksStateInitialized { + log.Infof("First foreign_key_checks update to: %s", strconv.FormatBool(dbForeignKeyChecksEnabled)) + w.foreignKeyChecksStateInitialized = true + } + return nil +} + +func (w *parallelWorker) commit() error { + if w.pool.posReached.Load() { + return nil + } + if w.vp.batchMode { + return w.dbClient.CommitTrxQueryBatch() // Commit the current trx batch + } else { + return w.dbClient.Commit() + } +} + +func (w *parallelWorker) applyEvent(ctx context.Context, event *binlogdatapb.VEvent) error { + select { + case w.events <- event: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (w *parallelWorker) applyQueuedEvents(ctx context.Context) (err error) { + defer func() { + if err != nil && w.dbClient != nil { + if err := w.dbClient.Rollback(); err != nil { + log.Errorf("Error rolling back transaction: %v", err) + } + } + }() + for { + if w.pool.posReached.Load() { + return io.EOF + } + select { + case <-ctx.Done(): + return ctx.Err() + case vevent := <-w.events: + if err := w.applyQueuedEvent(ctx, vevent); err != nil { + if err == io.EOF { + if !w.lastPos.IsZero() { + w.vp.pos.Store(&w.lastPos) + } + return err + } + // Not EOF + w.vp.vr.stats.ErrorCounts.Add([]string{"Apply"}, 1) + var table, tableLogMsg, gtidLogMsg string + switch { + case vevent.GetFieldEvent() != nil: + table = vevent.GetFieldEvent().TableName + case vevent.GetRowEvent() != nil: + table = vevent.GetRowEvent().TableName + } + if table != "" { + tableLogMsg = fmt.Sprintf(" for table %s", table) + } + gtidLogMsg = fmt.Sprintf(" while processing position %v", w.vp.pos.Load()) + log.Errorf("Error applying event%s%s: %s", tableLogMsg, gtidLogMsg, err.Error()) + err = vterrors.Wrapf(err, "error applying event%s%s", tableLogMsg, gtidLogMsg) + err = vterrors.Wrapf(err, "worker=%d", w.index) + err = vterrors.Wrapf(err, "event sequence_number=%d, event last_committed=%d", vevent.SequenceNumber, vevent.LastCommitted) + err = vterrors.Wrapf(err, "sequence_number=%v, last_committed=%v", w.sequenceNumber, w.lastCommitted) + return err + } + // No error + if vevent == terminateWorkerEvent { + if !w.lastPos.IsZero() { + w.vp.pos.Store(&w.lastPos) + } + return w.commit() + } + } + } +} + +func (w *parallelWorker) applyQueuedCommit(ctx context.Context, vevent *binlogdatapb.VEvent) error { + if vevent.MustSave { + if err := w.dbClient.Begin(); err != nil { + return err + } + } + if !w.dbClient.InTransaction { + // We're skipping an empty transaction. We may have to save the position on inactivity. + w.vp.unsavedEvent.Store(vevent) + return nil + } + posReached, err := w.updatePos(ctx, vevent.Timestamp) + if err != nil { + return err + } + + if err := w.commit(); err != nil { + return err + } + + if posReached { + return io.EOF + } + // No more events for this worker + return nil +} + +func (w *parallelWorker) applyQueuedRowEvent(ctx context.Context, vevent *binlogdatapb.VEvent) error { + if err := w.updateFKCheck(ctx, vevent.RowEvent.Flags); err != nil { + return err + } + var tplan *TablePlan + func() { + w.vp.planMu.Lock() + defer w.vp.planMu.Unlock() + tplan = w.vp.tablePlans[vevent.RowEvent.TableName] + }() + if tplan == nil { + return vterrors.Wrapf(errRetryEvent, "unexpected event on table %s", vevent.RowEvent.TableName) + } + applyFunc := func(sql string) (*sqltypes.Result, error) { + stats := NewVrLogStats("ROWCHANGE") + start := time.Now() + qr, err := w.queryFunc(ctx, sql) + w.vp.vr.stats.QueryCount.Add(w.vp.phase, 1) + w.vp.vr.stats.QueryTimings.Record(w.vp.phase, start) + stats.Send(sql) + return qr, err + } + + rowEvent := vevent.RowEvent + if w.vp.batchMode && len(rowEvent.RowChanges) > 1 { + // If we have multiple delete row events for a table with a single PK column + // then we can perform a simple bulk DELETE using an IN clause. + if (rowEvent.RowChanges[0].Before != nil && rowEvent.RowChanges[0].After == nil) && + tplan.MultiDelete != nil { + _, err := tplan.applyBulkDeleteChanges(rowEvent.RowChanges, applyFunc, w.dbClient.maxBatchSize) + return err + } + // If we're done with the copy phase then we will be replicating all INSERTS + // regardless of the PK value and can use a single INSERT statment with + // multiple VALUES clauses. + if len(w.vp.copyState) == 0 && (rowEvent.RowChanges[0].Before == nil && rowEvent.RowChanges[0].After != nil) { + _, err := tplan.applyBulkInsertChanges(rowEvent.RowChanges, applyFunc, w.dbClient.maxBatchSize) + return err + } + } + + currentConcurrency := w.pool.currentConcurrency.Add(1) + defer w.pool.currentConcurrency.Add(-1) + if currentConcurrency > w.pool.maxConcurrency.Load() { + w.pool.maxConcurrency.Store(currentConcurrency) + } + for _, change := range vevent.RowEvent.RowChanges { + if _, err := tplan.applyChange(change, applyFunc); err != nil { + return err + } + } + return nil +} + +func (w *parallelWorker) applyQueuedEvent(ctx context.Context, event *binlogdatapb.VEvent) error { + isHead := false + requireWait := false + for { + func() { + w.pool.mu.Lock() + defer w.pool.mu.Unlock() + + isHead = (w.index == w.pool.head) + if isHead { + // head worker is always applicable + return + } + for requireWait || !w.pool.isApplicable(w, event) { + // log.Errorf("========== QQQ applyQueuedEvent worker %v WAITING. head=%v", w.index, w.pool.head) + w.pool.wakeup.Wait() + requireWait = false + } + }() + err := w.applyApplicableQueuedEvent(ctx, event) + if errors.Is(vterrors.UnwrapAll(err), errRetryEvent) && !isHead { + requireWait = true + log.Errorf("========== QQQ worker %v error is errRetryEvent: %v", w.index, err) + // The error here is that we tried to apply a ROW event, but the table map for this row change + // we advertised in a FIELD event to a different worker. This happens because vstreamer optimizes + // table map events: it only sends the single first event for any table (until log is rotated or until + // table is changed). As we slice the relaylog events and distribute into different worker, it is possible + // that worker #3 will attempt to run a ROW event before worker #2 has applied the FIELD event for the same table. + // So what we do here is to .Wait() again (to be woken up when a previous worker completes its event queue). + continue + } + return err + } +} + +func (w *parallelWorker) applyApplicableQueuedEvent(ctx context.Context, event *binlogdatapb.VEvent) error { + switch event.Type { + case binlogdatapb.VEventType_UNKNOWN: + // An indication that there are no more events for this worker + return nil + } + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + // + t := time.NewTimer(5 * time.Second) + defer t.Stop() + go func() { + select { + case <-t.C: + log.Errorf("========== QQQ applyQueuedEvent worker %v TIMED OUT. event=%v", w.index, event.Type) + if event.Type == binlogdatapb.VEventType_ROW { + log.Errorf("========== QQQ applyQueuedEvent worker %v TIMED OUT. event=%v. table=%v", w.index, event.Type, event.RowEvent.TableName) + } + case <-ctx.Done(): + return + } + }() + + stats := NewVrLogStats(event.Type.String()) + switch event.Type { + case binlogdatapb.VEventType_GTID: + pos, err := binlogplayer.DecodePosition(event.Gtid) + if err != nil { + return err + } + func() { + w.vp.posMu.Lock() + defer w.vp.posMu.Unlock() + + w.vp.pos.Store(&pos) + // A new position should not be saved until a saveable event occurs. + w.vp.unsavedEvent.Store(nil) + }() + w.lastPos = pos + if w.vp.stopPos.IsZero() { + return nil + } + case binlogdatapb.VEventType_BEGIN: + // No-op: begin is called as needed. + case binlogdatapb.VEventType_COMMIT: + if err := w.applyQueuedCommit(ctx, event); err != nil { + return err + } + w.pool.numCommits.Add(1) + case binlogdatapb.VEventType_FIELD: + if err := w.dbClient.Begin(); err != nil { + return err + } + onField := func() error { + w.vp.planMu.Lock() + defer w.vp.planMu.Unlock() + + tplan, err := w.vp.replicatorPlan.buildExecutionPlan(event.FieldEvent) + if err != nil { + return err + } + + w.vp.tablePlans[event.FieldEvent.TableName] = tplan + return nil + } + if err := onField(); err != nil { + return err + } + stats.Send(fmt.Sprintf("%v", event.FieldEvent)) + + case binlogdatapb.VEventType_INSERT, binlogdatapb.VEventType_DELETE, binlogdatapb.VEventType_UPDATE, + binlogdatapb.VEventType_REPLACE, binlogdatapb.VEventType_SAVEPOINT: + // use event.Statement if available, preparing for deprecation in 8.0 + sql := event.Statement + if sql == "" { + sql = event.Dml + } + // If the event is for one of the AWS RDS "special" or pt-table-checksum tables, we skip + if !strings.Contains(sql, " mysql.rds_") && !strings.Contains(sql, " percona.checksums") { + // This is a player using statement based replication + if err := w.dbClient.Begin(); err != nil { + return err + } + if err := w.applyQueuedStmtEvent(ctx, event); err != nil { + return err + } + stats.Send(sql) + } + case binlogdatapb.VEventType_ROW: + if err := w.dbClient.Begin(); err != nil { + return err + } + if err := w.applyQueuedRowEvent(ctx, event); err != nil { + return err + } + // Row event is logged AFTER RowChanges are applied so as to calculate the total elapsed + // time for the Row event. + stats.Send(fmt.Sprintf("%v", event.RowEvent)) + case binlogdatapb.VEventType_OTHER: + if w.dbClient.InTransaction { + // Unreachable + log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) + } + // Just update the position. + posReached, err := w.updatePos(ctx, event.Timestamp) + if err != nil { + return err + } + if posReached { + return io.EOF + } + case binlogdatapb.VEventType_DDL: + if w.dbClient.InTransaction { + // Unreachable + log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) + } + w.vp.vr.stats.DDLEventActions.Add(w.vp.vr.source.OnDdl.String(), 1) // Record the DDL handling + switch w.vp.vr.source.OnDdl { + case binlogdatapb.OnDDLAction_IGNORE: + // We still have to update the position. + posReached, err := w.updatePos(ctx, event.Timestamp) + if err != nil { + return err + } + if posReached { + return io.EOF + } + case binlogdatapb.OnDDLAction_STOP: + if err := w.dbClient.Begin(); err != nil { + return err + } + if _, err := w.updatePos(ctx, event.Timestamp); err != nil { + return err + } + if err := w.vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, w.dbClient, fmt.Sprintf("Stopped at DDL %s", event.Statement)); err != nil { + return err + } + if err := w.dbClient.Commit(); err != nil { + return err + } + return io.EOF + case binlogdatapb.OnDDLAction_EXEC: + // It's impossible to save the position transactionally with the statement. + // So, we apply the DDL first, and then save the position. + // Manual intervention may be needed if there is a partial + // failure here. + if _, err := w.queryFunc(ctx, event.Statement); err != nil { + return err + } + stats.Send(fmt.Sprintf("%v", event.Statement)) + posReached, err := w.updatePos(ctx, event.Timestamp) + if err != nil { + return err + } + if posReached { + return io.EOF + } + case binlogdatapb.OnDDLAction_EXEC_IGNORE: + if _, err := w.queryFunc(ctx, event.Statement); err != nil { + log.Infof("Ignoring error: %v for DDL: %s", err, event.Statement) + } + stats.Send(fmt.Sprintf("%v", event.Statement)) + posReached, err := w.updatePos(ctx, event.Timestamp) + if err != nil { + return err + } + if posReached { + return io.EOF + } + } + case binlogdatapb.VEventType_JOURNAL: + if w.dbClient.InTransaction { + // Unreachable + log.Errorf("internal error: vplayer is in a transaction on event: %v", event) + return fmt.Errorf("internal error: vplayer is in a transaction on event: %v", event) + } + // Ensure that we don't have a partial set of table matches in the journal. + switch event.Journal.MigrationType { + case binlogdatapb.MigrationType_SHARDS: + // All tables of the source were migrated. So, no validation needed. + case binlogdatapb.MigrationType_TABLES: + // Validate that all or none of the tables are in the journal. + jtables := make(map[string]bool) + for _, table := range event.Journal.Tables { + jtables[table] = true + } + found := false + notFound := false + for tableName := range w.vp.replicatorPlan.TablePlans { + if _, ok := jtables[tableName]; ok { + found = true + } else { + notFound = true + } + } + switch { + case found && notFound: + // Some were found and some were not found. We can't handle this. + if err := w.vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, w.dbClient, "unable to handle journal event: tables were partially matched"); err != nil { + return err + } + return io.EOF + case notFound: + // None were found. Ignore journal. + return nil + } + // All were found. We must register journal. + } + log.Infof("Binlog event registering journal event %+v", event.Journal) + if err := w.vp.vr.vre.registerJournal(event.Journal, w.vp.vr.id); err != nil { + if err := w.vp.vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, w.dbClient, err.Error()); err != nil { + return err + } + return io.EOF + } + stats.Send(fmt.Sprintf("%v", event.Journal)) + return io.EOF + case binlogdatapb.VEventType_HEARTBEAT: + if event.Throttled { + if err := w.vp.vr.updateTimeThrottled(throttlerapp.VStreamerName, event.ThrottledReason); err != nil { + return err + } + } + if !w.dbClient.InTransaction { + w.vp.numAccumulatedHeartbeats++ + if err := w.vp.recordHeartbeat(); err != nil { + return err + } + } + } + + return nil +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker_test.go b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker_test.go new file mode 100644 index 00000000000..66a684918f5 --- /dev/null +++ b/go/vt/vttablet/tabletmanager/vreplication/vplayer_parallel_worker_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2024 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "errors" + "io" + "testing" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/vt/vterrors" +) + +func TestEOF(t *testing.T) { + { + err := io.EOF + assert.True(t, errors.Is(err, io.EOF)) + unwrapped := vterrors.UnwrapAll(err) + assert.True(t, errors.Is(unwrapped, io.EOF)) + } + { + err := vterrors.Wrapf(io.EOF, "unexpected EOF on table %s", "stress_test") + assert.False(t, errors.Is(err, io.EOF)) + unwrapped := vterrors.UnwrapAll(err) + assert.True(t, errors.Is(unwrapped, io.EOF)) + } +} + +func TestErrRetryEvent(t *testing.T) { + err := vterrors.Wrapf(errRetryEvent, "unexpected event on table %s", "stress_test") + assert.True(t, errors.Is(vterrors.UnwrapAll(err), errRetryEvent)) +} diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go index 76177b56b5b..7c5783a579e 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator.go @@ -94,9 +94,10 @@ const ( // vreplicator provides the core logic to start vreplication streams type vreplicator struct { - vre *Engine - id int32 - dbClient *vdbClient + vre *Engine + id int32 + dbClient *vdbClient + dbClientGen dbClientGenerator // source source *binlogdatapb.BinlogSource sourceVStreamer VStreamerClient @@ -141,7 +142,7 @@ type vreplicator struct { // More advanced constructs can be used. Please see the table plan builder // documentation for more info. func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer VStreamerClient, stats *binlogplayer.Stats, - dbClient binlogplayer.DBClient, mysqld mysqlctl.MysqlDaemon, vre *Engine, workflowConfig *vttablet.VReplicationConfig) *vreplicator { + dbClient binlogplayer.DBClient, dbClientGen dbClientGenerator, mysqld mysqlctl.MysqlDaemon, vre *Engine, workflowConfig *vttablet.VReplicationConfig) *vreplicator { if workflowConfig == nil { workflowConfig = vttablet.DefaultVReplicationConfig } @@ -157,6 +158,7 @@ func newVReplicator(id int32, source *binlogdatapb.BinlogSource, sourceVStreamer sourceVStreamer: sourceVStreamer, stats: stats, dbClient: newVDBClient(dbClient, stats, workflowConfig.RelayLogMaxItems), + dbClientGen: dbClientGen, mysqld: mysqld, workflowConfig: workflowConfig, } @@ -332,9 +334,9 @@ func (vr *vreplicator) replicate(ctx context.Context) error { return err } if vr.source.StopAfterCopy { - return vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, "Stopped after copy.") + return vr.setState(binlogdatapb.VReplicationWorkflowState_Stopped, nil, "Stopped after copy.") } - if err := vr.setState(binlogdatapb.VReplicationWorkflowState_Running, ""); err != nil { + if err := vr.setState(binlogdatapb.VReplicationWorkflowState_Running, nil, ""); err != nil { vr.stats.ErrorCounts.Add([]string{"Replicate"}, 1) return err } @@ -500,28 +502,31 @@ func (vr *vreplicator) insertLog(typ, message string) { insertLog(vr.dbClient, typ, vr.id, vr.state.String(), message) } -func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, message string) error { +func (vr *vreplicator) setState(state binlogdatapb.VReplicationWorkflowState, dbClient *vdbClient, message string) error { if message != "" { vr.stats.History.Add(&binlogplayer.StatsHistoryRecord{ Time: time.Now(), Message: message, }) } + if dbClient == nil { + dbClient = vr.dbClient + } vr.stats.State.Store(state.String()) query := fmt.Sprintf("update _vt.vreplication set state='%v', message=%v where id=%v", state, encodeString(binlogplayer.MessageTruncate(message)), vr.id) // If we're batching a transaction, then include the state update // in the current transaction batch. - if vr.dbClient.InTransaction && vr.dbClient.maxBatchSize > 0 { - vr.dbClient.AddQueryToTrxBatch(query) + if dbClient.InTransaction && dbClient.maxBatchSize > 0 { + dbClient.AddQueryToTrxBatch(query) } else { // Otherwise, send it down the wire - if _, err := vr.dbClient.ExecuteFetch(query, 1); err != nil { + if _, err := dbClient.ExecuteFetch(query, 1); err != nil { return fmt.Errorf("could not set state: %v: %v", query, err) } } if state == vr.state { return nil } - insertLog(vr.dbClient, LogStateChange, vr.id, state.String(), message) + insertLog(dbClient, LogStateChange, vr.id, state.String(), message) vr.state = state return nil diff --git a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go index b4e3ba4e366..1866cfddaf2 100644 --- a/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go +++ b/go/vt/vttablet/tabletmanager/vreplication/vreplicator_test.go @@ -233,7 +233,7 @@ func TestDeferSecondaryKeys(t *testing.T) { _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) require.NoError(t, err) }() - vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + vr := newVReplicator(id, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) getActionsSQLf := "select action from _vt.post_copy_action where table_name='%s'" getCurrentDDL := func(tableName string) string { req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{tableName}} @@ -387,7 +387,7 @@ func TestDeferSecondaryKeys(t *testing.T) { if err != nil { return err } - myvr := newVReplicator(myid, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + myvr := newVReplicator(myid, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) myvr.WorkflowType = int32(binlogdatapb.VReplicationWorkflowType_Reshard) // Insert second post copy action record to simulate a shard merge where you // have N controllers/replicators running for the same table on the tablet. @@ -631,7 +631,7 @@ func TestCancelledDeferSecondaryKeys(t *testing.T) { _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) require.NoError(t, err) }() - vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + vr := newVReplicator(id, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) vr.WorkflowType = int32(binlogdatapb.VReplicationWorkflowType_MoveTables) getCurrentDDL := func(tableName string) string { req := &tabletmanagerdatapb.GetSchemaRequest{Tables: []string{tableName}} @@ -750,7 +750,7 @@ func TestResumingFromPreviousWorkflowKeepingRowsCopied(t *testing.T) { _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) require.NoError(t, err) }() - vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + vr := newVReplicator(id, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) assert.Equal(t, rowsCopied, vr.stats.CopyRowCount.Get()) } @@ -851,7 +851,7 @@ func TestThrottlerAppNames(t *testing.T) { _, err = dbClient.ExecuteFetch(fmt.Sprintf("delete from _vt.vreplication where id = %d", id), 1) require.NoError(t, err) }() - vr := newVReplicator(id, bls, vsclient, stats, dbClient, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) + vr := newVReplicator(id, bls, vsclient, stats, dbClient, nil, env.Mysqld, playerEngine, vttablet.DefaultVReplicationConfig) settings, _, err := vr.loadSettings(ctx, newVDBClient(dbClient, stats, vttablet.DefaultVReplicationConfig.RelayLogMaxItems)) require.NoError(t, err) diff --git a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go index fb4cb324047..67b9ef8e3b1 100644 --- a/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go +++ b/go/vt/vttablet/tabletserver/vstreamer/vstreamer.go @@ -19,6 +19,7 @@ package vstreamer import ( "bytes" "context" + "errors" "fmt" "io" "strings" @@ -80,9 +81,11 @@ type vstreamer struct { versionTableID uint64 // format and pos are updated by parseEvent. - format mysql.BinlogFormat - pos replication.Position - stopPos string + format mysql.BinlogFormat + pos replication.Position + stopPos string + lastCommitted int64 + sequenceNumber int64 phase string vse *Engine @@ -239,7 +242,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog switch vevent.Type { case binlogdatapb.VEventType_GTID, binlogdatapb.VEventType_BEGIN, binlogdatapb.VEventType_FIELD, - binlogdatapb.VEventType_JOURNAL: + binlogdatapb.VEventType_PREVIOUS_GTIDS, binlogdatapb.VEventType_JOURNAL: // We never have to send GTID, BEGIN, FIELD events on their own. // A JOURNAL event is always preceded by a BEGIN and followed by a COMMIT. // So, we don't have to send it right away. @@ -382,7 +385,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog } for _, vevent := range vevents { if err := bufferAndTransmit(vevent); err != nil { - if err == io.EOF { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { return nil } vs.vse.errorCounts.Add("BufferAndTransmit", 1) @@ -405,7 +408,7 @@ func (vs *vstreamer) parseEvents(ctx context.Context, events <-chan mysql.Binlog case <-hbTimer.C: checkResult, ok := vs.vse.throttlerClient.ThrottleCheckOK(ctx, vs.throttlerApp) if err := injectHeartbeat(!ok, checkResult.Summary()); err != nil { - if err == io.EOF { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { return nil } vs.vse.errorCounts.Add("Send", 1) @@ -457,17 +460,25 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vev var vevents []*binlogdatapb.VEvent switch { + case ev.IsPreviousGTIDs(): + vevents = append(vevents, &binlogdatapb.VEvent{ + Type: binlogdatapb.VEventType_PREVIOUS_GTIDS, + }) case ev.IsGTID(): - gtid, hasBegin, err := ev.GTID(vs.format) + gtid, hasBegin, lastCommitted, sequenceNumber, err := ev.GTID(vs.format) if err != nil { return nil, fmt.Errorf("can't get GTID from binlog event: %v, event data: %#v", err, ev) } if hasBegin { vevents = append(vevents, &binlogdatapb.VEvent{ - Type: binlogdatapb.VEventType_BEGIN, + Type: binlogdatapb.VEventType_BEGIN, + LastCommitted: lastCommitted, + SequenceNumber: sequenceNumber, }) } vs.pos = replication.AppendGTID(vs.pos, gtid) + vs.lastCommitted = lastCommitted + vs.sequenceNumber = sequenceNumber case ev.IsXID(): vevents = append(vevents, &binlogdatapb.VEvent{ Type: binlogdatapb.VEventType_GTID, @@ -671,7 +682,7 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vev for { tpevent, err := tp.GetNextEvent() if err != nil { - if err == io.EOF { + if errors.Is(vterrors.UnwrapAll(err), io.EOF) { break } return nil, err @@ -707,6 +718,8 @@ func (vs *vstreamer) parseEvent(ev mysql.BinlogEvent, bufferAndTransmit func(vev for _, vevent := range vevents { vevent.Timestamp = int64(ev.Timestamp()) vevent.CurrentTime = time.Now().UnixNano() + vevent.SequenceNumber = vs.sequenceNumber + vevent.LastCommitted = vs.lastCommitted } return vevents, nil } diff --git a/proto/binlogdata.proto b/proto/binlogdata.proto index 3d55de7ea14..96d6c302edc 100644 --- a/proto/binlogdata.proto +++ b/proto/binlogdata.proto @@ -319,6 +319,8 @@ enum VEventType { // If a client experiences some disruptions before receiving the event, // the client should restart the copy operation. COPY_COMPLETED = 20; + // Indicates rotation into a new binary log + PREVIOUS_GTIDS = 21; } @@ -478,6 +480,12 @@ message VEvent { bool throttled = 24; // ThrottledReason is a human readable string that explains why the stream is throttled string throttled_reason = 25; + // For GTID events, the sequence number of the most recent transaction this event depends on / conflicts with. + int64 last_committed = 26; + // For GTID events, the sequence number of this transaction. + int64 sequence_number = 27; + // MustSave is a decoration by VPlayer + bool must_save = 28; } message MinimalTable { diff --git a/test/ci_workflow_gen.go b/test/ci_workflow_gen.go index bf42825d73c..7b6808e69ec 100644 --- a/test/ci_workflow_gen.go +++ b/test/ci_workflow_gen.go @@ -80,6 +80,7 @@ var ( "mysql_server_vault", "vstream", "onlineddl_vrepl", + "onlineddl_vrepl_bench", "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", "onlineddl_vrepl_suite", @@ -151,6 +152,7 @@ var ( } clusterRequiring16CoresMachines = []string{ "onlineddl_vrepl", + "onlineddl_vrepl_bench", "onlineddl_vrepl_stress", "onlineddl_vrepl_stress_suite", "onlineddl_vrepl_suite", diff --git a/test/config.json b/test/config.json index 17cdb019e97..3b8cc09f51f 100644 --- a/test/config.json +++ b/test/config.json @@ -295,6 +295,15 @@ "RetryMax": 1, "Tags": [] }, + "onlineddl_vrepl_bench": { + "File": "unused.go", + "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl_bench", "-timeout", "30m"], + "Command": [], + "Manual": false, + "Shard": "onlineddl_vrepl_bench", + "RetryMax": 1, + "Tags": [] + }, "onlineddl_vrepl_suite": { "File": "unused.go", "Args": ["vitess.io/vitess/go/test/endtoend/onlineddl/vrepl_suite", "-timeout", "30m"], diff --git a/web/vtadmin/src/proto/vtadmin.d.ts b/web/vtadmin/src/proto/vtadmin.d.ts index 4411c436083..c16f2d24346 100644 --- a/web/vtadmin/src/proto/vtadmin.d.ts +++ b/web/vtadmin/src/proto/vtadmin.d.ts @@ -37471,7 +37471,8 @@ export namespace binlogdata { VERSION = 17, LASTPK = 18, SAVEPOINT = 19, - COPY_COMPLETED = 20 + COPY_COMPLETED = 20, + PREVIOUS_GTIDS = 21 } /** Properties of a RowChange. */ @@ -38450,6 +38451,15 @@ export namespace binlogdata { /** VEvent throttled_reason */ throttled_reason?: (string|null); + + /** VEvent last_committed */ + last_committed?: (number|Long|null); + + /** VEvent sequence_number */ + sequence_number?: (number|Long|null); + + /** VEvent must_save */ + must_save?: (boolean|null); } /** Represents a VEvent. */ @@ -38506,6 +38516,15 @@ export namespace binlogdata { /** VEvent throttled_reason. */ public throttled_reason: string; + /** VEvent last_committed. */ + public last_committed: (number|Long); + + /** VEvent sequence_number. */ + public sequence_number: (number|Long); + + /** VEvent must_save. */ + public must_save: boolean; + /** * Creates a new VEvent instance using the specified properties. * @param [properties] Properties to set diff --git a/web/vtadmin/src/proto/vtadmin.js b/web/vtadmin/src/proto/vtadmin.js index 457ae6e4214..29dc17b875c 100644 --- a/web/vtadmin/src/proto/vtadmin.js +++ b/web/vtadmin/src/proto/vtadmin.js @@ -88120,6 +88120,7 @@ export const binlogdata = $root.binlogdata = (() => { * @property {number} LASTPK=18 LASTPK value * @property {number} SAVEPOINT=19 SAVEPOINT value * @property {number} COPY_COMPLETED=20 COPY_COMPLETED value + * @property {number} PREVIOUS_GTIDS=21 PREVIOUS_GTIDS value */ binlogdata.VEventType = (function() { const valuesById = {}, values = Object.create(valuesById); @@ -88144,6 +88145,7 @@ export const binlogdata = $root.binlogdata = (() => { values[valuesById[18] = "LASTPK"] = 18; values[valuesById[19] = "SAVEPOINT"] = 19; values[valuesById[20] = "COPY_COMPLETED"] = 20; + values[valuesById[21] = "PREVIOUS_GTIDS"] = 21; return values; })(); @@ -90604,6 +90606,9 @@ export const binlogdata = $root.binlogdata = (() => { * @property {string|null} [shard] VEvent shard * @property {boolean|null} [throttled] VEvent throttled * @property {string|null} [throttled_reason] VEvent throttled_reason + * @property {number|Long|null} [last_committed] VEvent last_committed + * @property {number|Long|null} [sequence_number] VEvent sequence_number + * @property {boolean|null} [must_save] VEvent must_save */ /** @@ -90741,6 +90746,30 @@ export const binlogdata = $root.binlogdata = (() => { */ VEvent.prototype.throttled_reason = ""; + /** + * VEvent last_committed. + * @member {number|Long} last_committed + * @memberof binlogdata.VEvent + * @instance + */ + VEvent.prototype.last_committed = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VEvent sequence_number. + * @member {number|Long} sequence_number + * @memberof binlogdata.VEvent + * @instance + */ + VEvent.prototype.sequence_number = $util.Long ? $util.Long.fromBits(0,0,false) : 0; + + /** + * VEvent must_save. + * @member {boolean} must_save + * @memberof binlogdata.VEvent + * @instance + */ + VEvent.prototype.must_save = false; + /** * Creates a new VEvent instance using the specified properties. * @function create @@ -90795,6 +90824,12 @@ export const binlogdata = $root.binlogdata = (() => { writer.uint32(/* id 24, wireType 0 =*/192).bool(message.throttled); if (message.throttled_reason != null && Object.hasOwnProperty.call(message, "throttled_reason")) writer.uint32(/* id 25, wireType 2 =*/202).string(message.throttled_reason); + if (message.last_committed != null && Object.hasOwnProperty.call(message, "last_committed")) + writer.uint32(/* id 26, wireType 0 =*/208).int64(message.last_committed); + if (message.sequence_number != null && Object.hasOwnProperty.call(message, "sequence_number")) + writer.uint32(/* id 27, wireType 0 =*/216).int64(message.sequence_number); + if (message.must_save != null && Object.hasOwnProperty.call(message, "must_save")) + writer.uint32(/* id 28, wireType 0 =*/224).bool(message.must_save); return writer; }; @@ -90889,6 +90924,18 @@ export const binlogdata = $root.binlogdata = (() => { message.throttled_reason = reader.string(); break; } + case 26: { + message.last_committed = reader.int64(); + break; + } + case 27: { + message.sequence_number = reader.int64(); + break; + } + case 28: { + message.must_save = reader.bool(); + break; + } default: reader.skipType(tag & 7); break; @@ -90949,6 +90996,7 @@ export const binlogdata = $root.binlogdata = (() => { case 18: case 19: case 20: + case 21: break; } if (message.timestamp != null && message.hasOwnProperty("timestamp")) @@ -91003,6 +91051,15 @@ export const binlogdata = $root.binlogdata = (() => { if (message.throttled_reason != null && message.hasOwnProperty("throttled_reason")) if (!$util.isString(message.throttled_reason)) return "throttled_reason: string expected"; + if (message.last_committed != null && message.hasOwnProperty("last_committed")) + if (!$util.isInteger(message.last_committed) && !(message.last_committed && $util.isInteger(message.last_committed.low) && $util.isInteger(message.last_committed.high))) + return "last_committed: integer|Long expected"; + if (message.sequence_number != null && message.hasOwnProperty("sequence_number")) + if (!$util.isInteger(message.sequence_number) && !(message.sequence_number && $util.isInteger(message.sequence_number.low) && $util.isInteger(message.sequence_number.high))) + return "sequence_number: integer|Long expected"; + if (message.must_save != null && message.hasOwnProperty("must_save")) + if (typeof message.must_save !== "boolean") + return "must_save: boolean expected"; return null; }; @@ -91109,6 +91166,10 @@ export const binlogdata = $root.binlogdata = (() => { case 20: message.type = 20; break; + case "PREVIOUS_GTIDS": + case 21: + message.type = 21; + break; } if (object.timestamp != null) if ($util.Long) @@ -91167,6 +91228,26 @@ export const binlogdata = $root.binlogdata = (() => { message.throttled = Boolean(object.throttled); if (object.throttled_reason != null) message.throttled_reason = String(object.throttled_reason); + if (object.last_committed != null) + if ($util.Long) + (message.last_committed = $util.Long.fromValue(object.last_committed)).unsigned = false; + else if (typeof object.last_committed === "string") + message.last_committed = parseInt(object.last_committed, 10); + else if (typeof object.last_committed === "number") + message.last_committed = object.last_committed; + else if (typeof object.last_committed === "object") + message.last_committed = new $util.LongBits(object.last_committed.low >>> 0, object.last_committed.high >>> 0).toNumber(); + if (object.sequence_number != null) + if ($util.Long) + (message.sequence_number = $util.Long.fromValue(object.sequence_number)).unsigned = false; + else if (typeof object.sequence_number === "string") + message.sequence_number = parseInt(object.sequence_number, 10); + else if (typeof object.sequence_number === "number") + message.sequence_number = object.sequence_number; + else if (typeof object.sequence_number === "object") + message.sequence_number = new $util.LongBits(object.sequence_number.low >>> 0, object.sequence_number.high >>> 0).toNumber(); + if (object.must_save != null) + message.must_save = Boolean(object.must_save); return message; }; @@ -91207,6 +91288,17 @@ export const binlogdata = $root.binlogdata = (() => { object.shard = ""; object.throttled = false; object.throttled_reason = ""; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.last_committed = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.last_committed = options.longs === String ? "0" : 0; + if ($util.Long) { + let long = new $util.Long(0, 0, false); + object.sequence_number = options.longs === String ? long.toString() : options.longs === Number ? long.toNumber() : long; + } else + object.sequence_number = options.longs === String ? "0" : 0; + object.must_save = false; } if (message.type != null && message.hasOwnProperty("type")) object.type = options.enums === String ? $root.binlogdata.VEventType[message.type] === undefined ? message.type : $root.binlogdata.VEventType[message.type] : message.type; @@ -91244,6 +91336,18 @@ export const binlogdata = $root.binlogdata = (() => { object.throttled = message.throttled; if (message.throttled_reason != null && message.hasOwnProperty("throttled_reason")) object.throttled_reason = message.throttled_reason; + if (message.last_committed != null && message.hasOwnProperty("last_committed")) + if (typeof message.last_committed === "number") + object.last_committed = options.longs === String ? String(message.last_committed) : message.last_committed; + else + object.last_committed = options.longs === String ? $util.Long.prototype.toString.call(message.last_committed) : options.longs === Number ? new $util.LongBits(message.last_committed.low >>> 0, message.last_committed.high >>> 0).toNumber() : message.last_committed; + if (message.sequence_number != null && message.hasOwnProperty("sequence_number")) + if (typeof message.sequence_number === "number") + object.sequence_number = options.longs === String ? String(message.sequence_number) : message.sequence_number; + else + object.sequence_number = options.longs === String ? $util.Long.prototype.toString.call(message.sequence_number) : options.longs === Number ? new $util.LongBits(message.sequence_number.low >>> 0, message.sequence_number.high >>> 0).toNumber() : message.sequence_number; + if (message.must_save != null && message.hasOwnProperty("must_save")) + object.must_save = message.must_save; return object; };