From acfabb21316ad305122c1ca579f774c9ee692ef5 Mon Sep 17 00:00:00 2001 From: leonz789 Date: Fri, 28 Feb 2025 01:59:26 +0800 Subject: [PATCH] update components update nst_post_handler, use balance to replace balance-change remove nst balance change cap to 'unlimit'(uint64) balance recovery for 2nd phase data --- local_node.sh | 4 +- proto/imuachain/oracle/v1/rawdata_nst.proto | 5 +- proto/imuachain/oracle/v1/two_phases.proto | 33 + x/oracle/keeper/common/expected_keepers.go | 12 + x/oracle/keeper/common/two_phases.go | 3 +- x/oracle/keeper/feedermanagement/caches.go | 3 +- .../keeper/feedermanagement/feedermanager.go | 140 +- .../feedermanagement/feedermanager_test.go | 4 +- .../keeper/feedermanagement/helper_test.go | 4 +- x/oracle/keeper/feedermanagement/round.go | 13 +- x/oracle/keeper/feedermanagement/types.go | 16 +- x/oracle/keeper/msg_server_price_feed.go | 8 +- x/oracle/keeper/nst_post_aggregation.go | 174 +-- x/oracle/keeper/post_aggregation.go | 15 +- x/oracle/keeper/prices.go | 46 +- x/oracle/keeper/two_phases.go | 218 +++- x/oracle/types/key_two_phase.go | 54 +- x/oracle/types/merkletree.go | 37 +- x/oracle/types/message_create_price.go | 4 + x/oracle/types/params.go | 17 +- x/oracle/types/rawdata_nst.pb.go | 84 +- x/oracle/types/two_phases.pb.go | 1145 +++++++++++++++++ 22 files changed, 1761 insertions(+), 278 deletions(-) create mode 100644 proto/imuachain/oracle/v1/two_phases.proto create mode 100644 x/oracle/types/two_phases.pb.go diff --git a/local_node.sh b/local_node.sh index 1509cef9a..fe12efb31 100755 --- a/local_node.sh +++ b/local_node.sh @@ -244,8 +244,8 @@ imua: grpc: 127.0.0.1:9090 ws: !!str ws://127.0.0.1:26657/websocket rpc: !!str http://127.0.0.1:26657 -debugger: - grpc: !!str :50051 +#debugger: +# grpc: !!str :50051 EOF ) diff --git a/proto/imuachain/oracle/v1/rawdata_nst.proto b/proto/imuachain/oracle/v1/rawdata_nst.proto index 0fa60583e..f072224c3 100644 --- a/proto/imuachain/oracle/v1/rawdata_nst.proto +++ b/proto/imuachain/oracle/v1/rawdata_nst.proto @@ -9,11 +9,12 @@ message NSTKV { // staker index for a nst defined on imuachain side uint32 staker_index = 1; // balance change since last update - int64 balance_change = 2; + int64 balance = 2; } // RawDataNST represents balance changes of all stakers for a NST message RawDataNST { + uint64 version = 1; // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed - repeated NSTKV nst_balance_changes = 1; + repeated NSTKV nst_balance_changes = 2; } diff --git a/proto/imuachain/oracle/v1/two_phases.proto b/proto/imuachain/oracle/v1/two_phases.proto new file mode 100644 index 000000000..63b51a15c --- /dev/null +++ b/proto/imuachain/oracle/v1/two_phases.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; +package imuachain.oracle.v1; + +option go_package = "github.com/imua-xyz/imuachain/x/oracle/types"; + +// Nonce is a message that contains a nonce for a feeder +message ValidatorIndex { + // FeederID is the ID of the feeder that corresponding to the nonce + string validator = 1; + // value is the nonce value + uint32 next_index = 2; +} + +// ValidatorNonce is a message that contains the nonces for a validator +message FeederValidatorsIndex{ + // nonces is the list of nonces for the feeders + repeated ValidatorIndex validator_index_list= 2; +} + +message HashNode { + uint32 index = 1; + bytes hash = 2; +} + +// Proof represents all hash nodes of a Mekle tree with indexes +message FlattenTree{ + repeated HashNode nodes = 1; +} + +message TreeInfo { + uint32 leaf_count = 1; + bytes root_hash = 2; +} diff --git a/x/oracle/keeper/common/expected_keepers.go b/x/oracle/keeper/common/expected_keepers.go index 0e83e37ed..15c45c6d4 100644 --- a/x/oracle/keeper/common/expected_keepers.go +++ b/x/oracle/keeper/common/expected_keepers.go @@ -6,6 +6,7 @@ import ( sdkmath "cosmossdk.io/math" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" dogfoodkeeper "github.com/imua-xyz/imuachain/x/dogfood/keeper" @@ -71,6 +72,17 @@ type KeeperOracle interface { SetNonce(ctx sdk.Context, nonce types.ValidatorNonce) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types.Price, error) GetMultipleAssetsPrices(ctx sdk.Context, assetIDs map[string]interface{}) (map[string]types.Price, error) + + Setup2ndPhase(ctx sdk.Context, feederID uint64, validators []string, leafCount uint32, rootHash []byte) + Clear2ndPhase(ctx sdk.Context, feederID uint64, rootIndex uint32) + AddNodesToMerkleTree(ctx sdk.Context, feederID uint64, proof []*types.HashNode) + SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, pieceIndex uint32) + GetPostAggregation(feederID int64) (handler PostAggregationHandler, found bool) + SetRawDataPiece(ctx sdk.Context, feederID uint64, pieceIndex uint32, rawData []byte) + GetRawDataPieces(ctx sdk.Context, feederID uint64) ([][]byte, error) + GetFeederTreeInfo(ctx sdk.Context, feederID uint64) (uint32, []byte) + GetNodesFromMerkleTree(ctx sdk.Context, feederID uint64) []*types.HashNode + MustUnmarshal(bz []byte, ptr codec.ProtoMarshaler) } var _ KeeperDogfood = dogfoodkeeper.Keeper{} diff --git a/x/oracle/keeper/common/two_phases.go b/x/oracle/keeper/common/two_phases.go index 60a2f9ff8..167aa2b9a 100644 --- a/x/oracle/keeper/common/two_phases.go +++ b/x/oracle/keeper/common/two_phases.go @@ -5,4 +5,5 @@ import ( ) // the input data could be either rawData bytes of data with big size for non-price senarios or 'price' info -type PostAggregationHandler func(data []byte, ctx sdk.Context, k KeeperOracle) error +// type PostAggregationHandler func(data []byte, ctx sdk.Context, k KeeperOracle) error +type PostAggregationHandler func(ctx sdk.Context, data []byte, feederID, roundID uint64, k KeeperOracle) error diff --git a/x/oracle/keeper/feedermanagement/caches.go b/x/oracle/keeper/feedermanagement/caches.go index f4d1e73c2..5990fa324 100644 --- a/x/oracle/keeper/feedermanagement/caches.go +++ b/x/oracle/keeper/feedermanagement/caches.go @@ -9,7 +9,6 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" - "github.com/imua-xyz/imuachain/x/oracle/types" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) @@ -127,7 +126,7 @@ func (c *caches) IsRule2PhasesByFeederID(feederID uint64) bool { return c.isRule2PhasesByRule(rule) } -func (c *caches) isRule2PhasesByRule(rule *types.RuleSource) bool { +func (c *caches) isRule2PhasesByRule(rule *oracletypes.RuleSource) bool { // just check the format and don't care the verification here, the verification should be done by 'params' not in this memory calculator(feedermanager) if len(rule.SourceIDs) == 1 && rule.SourceIDs[0] == 0 && rule.Nom != nil && len(rule.Nom.SourceIDs) == 1 { diff --git a/x/oracle/keeper/feedermanagement/feedermanager.go b/x/oracle/keeper/feedermanagement/feedermanager.go index 672ca142f..cdd728af9 100644 --- a/x/oracle/keeper/feedermanagement/feedermanager.go +++ b/x/oracle/keeper/feedermanagement/feedermanager.go @@ -65,8 +65,7 @@ func (f *FeederManager) BeginBlock(ctx sdk.Context) (recovered bool) { // if the cache is nil and we are not in recovery mode, init the caches if f.cs == nil { var err error - recovered, err = f.recovery(ctx) - // it's safe to panic since this will only happen when the node is starting with something wrong in the store + recovered, err = f.recovery(ctx) // it's safe to panic since this will only happen when the node is starting with something wrong in the store if err != nil { panic(err) } @@ -284,19 +283,57 @@ func (f *FeederManager) commitRounds(ctx sdk.Context) { // #nosec G115 // tokenID is index of slice if updated := f.k.AppendPriceTR(ctx, uint64(r.tokenID), *priceCommit, finalPrice.DetID); !updated { - // failed to append price due to roundID gap, and this is a 'should-not-happen' case - f.k.GrowRoundID(ctx, uint64(r.tokenID), uint64(r.roundID)) + // this is an 'impossible' case, we should not reach here + latestPrice, latestRoundID := f.k.GrowRoundID(ctx, uint64(r.tokenID), uint64(r.roundID)) + logger.Error("failed to append price due to roundID gap and update this round with GrowRoundID", "feederID", r.feederID, "try-to-update-roundID", r.roundID, "try-to-update-price", priceCommit, "restul-latestPrice", latestPrice, "result-latestRoundID", latestRoundID) + } else { + fstr := strconv.FormatInt(feederID, 10) + successFeederIDs = append(successFeederIDs, fstr) + + // set up for 2-phases aggregation + if r.twoPhases { + // no more validation check, they've all been done by previous process + lc, _ := strconv.ParseUint(finalPrice.DetID, 10, 32) + // set up mem-round for 2nd phase aggregation + r.m = oracletypes.NewMT(f.cs.RawDataPieceSize(), uint32(lc), []byte(finalPrice.Price)) + // set up state for 2nd phase aggregation + // #nosec G115 + f.k.Setup2ndPhase(ctx, uint64(r.feederID), f.cs.GetValidators(), uint32(lc), []byte(finalPrice.Price)) + } } - - fstr := strconv.FormatInt(feederID, 10) - successFeederIDs = append(successFeederIDs, fstr) // there's no valid price for any round yet } else { logger.Error("We currently only support rules under oracle V1: only allow price from source Chainlink", "feederID", r.feederID) } } // keep aggregator for possible 'handlQuotingMisBehavior' at quotingWindowEnd r.status = roundStatusClosed + } else if r.twoPhases { + // check if r is 2-phases and rawData is completed, for 2nd-phase, the status of round must be closed + if r.m.CollectingRawData() { + if len(r.cachedProofForBlock) > 0 { + // #nosec G115 + f.k.AddNodesToMerkleTree(ctx, uint64(r.feederID), r.cachedProofForBlock) + // reset cachedProofForBlock after commit to sate + r.cachedProofForBlock = nil + } + if LatestLeafIndex, ok := r.m.LatestLeafIndex(); ok { + // #nosec G115 + f.k.SetNextPieceIndexForFeeder(ctx, uint64(r.feederID), LatestLeafIndex+1) + } + } else if rawData, ok := r.m.CompleteRawData(); ok { + // execute postHandler with rawData + if err := r.h(ctx, rawData, uint64(r.feederID), uint64(r.roundID), f.k); err != nil { + // just log the error and wait for next round to update + // TODO(leonz): this suites for NST, we can just wait for next round to update, but does it suites for commmon case ? should we do some other postHandling for this fail when it's not of NST case? + logger.Error("failed to execute postHandler for 2phases aggregation on consensus price", "feederID", r.feederID, "roundID", r.roundID, "consensus 1st-phase hash:%s", hex.EncodeToString(r.m.RootHash())) + } + // reset related cache from state + // #nosec G115 + f.k.Clear2ndPhase(ctx, uint64(r.feederID), r.m.RootIndex()) + r.m = nil + } } + // close all quotingWindow to skip current rounds' 'handlQuotingMisBehavior' if f.forceSeal { r.closeQuotingWindow() @@ -532,7 +569,8 @@ func (f *FeederManager) updateRoundsParamsAndAddNewRounds(ctx sdk.Context) { logger.Info("[mem] add new round", "feederID", feederID, "height", height) f.sortedFeederIDs = append(f.sortedFeederIDs, feederID) twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(feederID)) - f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) + ph, _ := f.k.GetPostAggregation(feederID) + f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases, ph) } } f.sortedFeederIDs.sort() @@ -663,6 +701,9 @@ func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { } } + if f.cs.IsRule2PhasesByFeederID(msg.FeederID) && msg.IsNotTwoPhases() { + return fmt.Errorf("feederID:%d is configured for 2-phases aggregation, but the message is not of 2-phases", msg.FeederID) + } // extra check for message as 1st phase for 2-phases aggregation if msg.IsPhaseOne() { if len(msg.Prices) != 1 { @@ -687,10 +728,11 @@ func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { } // we wait one more maxNonce blocks to make sure proposer getting expected txs in their mempool + // we don't use the last block of current round(which is the baseBlock of the next round), so the quotingWindow for 2nd-phase message is from [baseBlock+2*maxNonce, nextBaseBlock-1] // #nosec G115 // maxNonce is positive windowForPhaseTwo := f.cs.IntervalForFeederID(msg.FeederID) - uint64(f.cs.GetMaxNonce())*2 - if leafCount > windowForPhaseTwo { - return fmt.Errorf("2-phases aggregation for feederID:%d, should have detID less than or equal to %d", msg.FeederID, windowForPhaseTwo) + if leafCount < 1 || leafCount > windowForPhaseTwo { + return fmt.Errorf("2-phases aggregation for feederID:%d, should have detID less than or equal to %d and be at least 1, got%d", msg.FeederID, windowForPhaseTwo, leafCount) } } return nil @@ -713,6 +755,26 @@ func (f *FeederManager) ProcessQuote(ctx sdk.Context, msg *oracletypes.MsgCreate return nil, fmt.Errorf("round not exists for feederID:%d, proposer:%s", msgItem.FeederID, msgItem.Validator) } + // TODO(leonz): remove this ? + if !r.twoPhases != msg.IsNotTwoPhases() { + // this should not happen, since message itself had been checked in 'validateMsg', when came to here it means there' something wront in 'round' initialization + return nil, fmt.Errorf("the 2phases status of round and message is mismatched, there's got something wrong with mem-round initialzation, feederID:%d", msg.FeederID) + } + + if msg.IsPhaseTwo() { + // either there's no consensus price from 1st phase or the 2nd phase had collected all pieces, this condition will be true and we will reject the transaction + // also we don't record any 'miss' count under this same condition + if r.m == nil || r.m.Completed() { + return nil, fmt.Errorf("message with 2-nd phase for feederID:%d of round_%d is reject since that round is not collecting raw data", msg.FeederID, r.roundID) + } + + // #nosec G115 + if uint64(ctx.BlockHeight()) < r.roundPhaseTwoStartBlock { + return nil, fmt.Errorf("message with 2-nd phase for feederID:%d of round_%d can only be accept at block height of at least %d", msg.FeederID, r.roundID, r.roundPhaseTwoStartBlock) + } + + } + // #nosec G115 // baseBlock is block height which is not negative if valid := r.ValidQuotingBaseBlock(int64(msg.BasedBlock)); !valid { return nil, fmt.Errorf("failed to process price-feed msg for feederID:%d, round is quoting:%t,quotingWindow is open:%t, expected baseBlock:%d, got baseBlock:%d", msgItem.FeederID, r.IsQuoting(), r.IsQuotingWindowOpen(), r.roundBaseBlock, msg.BasedBlock) @@ -839,8 +901,10 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { continue } tfID := int64(tfID) + // #nosec G115 // safe conversion twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(tfID)) - f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) + postHandler, _ := f.k.GetPostAggregation(tfID) + f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases, postHandler) f.sortedFeederIDs.add(tfID) } f.prepareRounds(ctxReplay) @@ -878,10 +942,36 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { f.cs.SkipCommit() + pieceSize := f.cs.RawDataPieceSize() + // recovery for 2nd-phase state + for _, r := range f.rounds { + if r.twoPhases { + //reset r.m from state + // #nosec G115 + feederID := uint64(r.feederID) + leafCount, rootHash := f.k.GetFeederTreeInfo(ctx, uint64(r.feederID)) + if leafCount == 0 { + continue + } + r.m = oracletypes.NewMT(pieceSize, leafCount, rootHash) + // rawdata + rawDataPieces, err := f.k.GetRawDataPieces(ctx, feederID) + if err != nil { + return false, err + } + r.m.SetRawDataPieces(rawDataPieces) + // proof nodes + // #nosec G115 + nodes := f.k.GetNodesFromMerkleTree(ctx, uint64(r.feederID)) + r.m.SetProofNodes(nodes) + } + } + return true, nil } func (f *FeederManager) RoundIDToBaseBlock(feederID, roundID uint64) (uint64, bool) { + // #nosec G115 r, ok := f.rounds[int64(feederID)] if !ok { return 0, false @@ -891,7 +981,7 @@ func (f *FeederManager) RoundIDToBaseBlock(feederID, roundID uint64) (uint64, bo // BaseBlockToRoundID returns the roundID which the input baseblock indicates to, it is different to the roundID of which this baseBlock BelongsTo (+1) func (f *FeederManager) BaseBlockToNextRoundID(feederID, baseBlock uint64) (uint64, bool) { - //TODO(leonz): use uint64 as f.rounds key + // TODO(leonz): use uint64 as f.rounds key // #nosec G115 r, ok := f.rounds[int64(feederID)] if !ok { @@ -996,34 +1086,42 @@ func getProtoMsgItemFromQuote(msg *oracletypes.MsgCreatePrice) *oracletypes.MsgI } } -func (f *FeederManager) ProcessRawData(msg *oracletypes.MsgCreatePrice) error { +// ProcessRawData verify the submitted piece of rawData with proof against the expected root and cached the result if it passded the verification +// return (cached rawData piece, error) +func (f *FeederManager) ProcessRawData(msg *oracletypes.MsgCreatePrice) ([]byte, error) { + if err := f.ValidateMsg(msg); err != nil { + return nil, oracletypes.ErrInvalidMsg.Wrap(err.Error()) + } piece, ok := f.GetPieceWithProof(msg) if !ok { - return errors.New("failed to parse rawdata piece from message") + return nil, errors.New("failed to parse rawdata piece from message") } // #nosec G115 r, ok := f.rounds[int64(msg.FeederID)] if !ok { // this should not happen - return fmt.Errorf("round for feederID:%d not exists", msg.FeederID) + return nil, fmt.Errorf("round for feederID:%d not exists", msg.FeederID) } - if r.m != nil { - return fmt.Errorf("feederID %d is not collecting rawData", msg.FeederID) + if r.m == nil { + return nil, fmt.Errorf("feederID %d is not collecting rawData", msg.FeederID) } // we don't check the 1st return value to see if this input proof is of the minimal, that's the duty of anteHandler, and 'verified' pieceWithProof will not fail the tx execution - _, ok = r.m.VerifyAndCache(piece.Index, piece.RawData, piece.Proof) + cachedProof, ok := r.m.VerifyAndCache(piece.Index, piece.RawData, piece.Proof) if !ok { - return fmt.Errorf("failed to verify piece of index %d provided within message for feederID:%d against root:%s", piece.Index, msg.FeederID, hex.EncodeToString(r.m.RootHash())) + return nil, fmt.Errorf("failed to verify piece of index %d provided within message for feederID:%d against root:%s", piece.Index, msg.FeederID, hex.EncodeToString(r.m.RootHash())) + } + // we don't need to cache the proof for state updating if the merkle tree have collected all rawData + if !r.m.Completed() { + r.cachedProofForBlock = append(r.cachedProofForBlock, cachedProof...) } // we don't do no state update in tx exexuting, the postHandler and all state update will be handled in EndBlock // // post handle rawData registered for the feederID // // clear all caching pieces from stateDB - // // // remove/reset merkleTree // // remove merkleTree // persist piece for recovery (with memory-cache update into merkleTree) // save this piece and proof to db for recovery, for nodes without running, // this process only causes additional: two write to stateDB(piece, proof), one read from the stateDB(piece) - return nil + return piece.RawData, nil } diff --git a/x/oracle/keeper/feedermanagement/feedermanager_test.go b/x/oracle/keeper/feedermanagement/feedermanager_test.go index 954c7f22b..c1c80cdcc 100644 --- a/x/oracle/keeper/feedermanagement/feedermanager_test.go +++ b/x/oracle/keeper/feedermanagement/feedermanager_test.go @@ -26,14 +26,14 @@ func TestFeederManagement(t *testing.T) { ps2 := ps1 fm2 := *fm - fm.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian, false, nil) fm.rounds[1].PrepareForNextBlock(20) fm.sortedFeederIDs.add(1) fm.rounds[1].a.ds.AddPriceSource(&ps1, big.NewInt(1), "v1") fm2.rounds = make(map[int64]*round) fm2.sortedFeederIDs = make([]int64, 0) - fm2.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm2.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian, false, nil) fm2.rounds[1].PrepareForNextBlock(20) fm2.sortedFeederIDs.add(1) fm2.rounds[1].a.ds.AddPriceSource(&ps2, big.NewInt(1), "v1") diff --git a/x/oracle/keeper/feedermanagement/helper_test.go b/x/oracle/keeper/feedermanagement/helper_test.go index 4f70b4611..d30112632 100644 --- a/x/oracle/keeper/feedermanagement/helper_test.go +++ b/x/oracle/keeper/feedermanagement/helper_test.go @@ -98,12 +98,12 @@ func (t *Test) NewAggregator(filled bool) *aggregator { func (t *Test) NewRound(cs CacheReader) *round { feederID := r.Intn(len(params.TokenFeeders)-1) + 1 - round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) + round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false, nil) return round } func (t *Test) NewRoundWithFeederID(cs CacheReader, feederID int64) *round { - round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) + round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false, nil) return round } diff --git a/x/oracle/keeper/feedermanagement/round.go b/x/oracle/keeper/feedermanagement/round.go index d2b5384ea..311b149e4 100644 --- a/x/oracle/keeper/feedermanagement/round.go +++ b/x/oracle/keeper/feedermanagement/round.go @@ -3,10 +3,11 @@ package feedermanagement import ( "fmt" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) -func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm, twoPhases bool) *round { +func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm, twoPhases bool, postHandler common.PostAggregationHandler) *round { ret := &round{ // #nosec G115 startBaseBlock: int64(tokenFeeder.StartBaseBlock), @@ -21,7 +22,6 @@ func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowS // #nosec G115 tokenID: int64(tokenFeeder.TokenID), cache: cache, - // default value status: roundStatusClosed, a: nil, @@ -31,7 +31,9 @@ func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowS twoPhases: twoPhases, } if twoPhases { - ret.rawData = make([][]byte, 0) + if postHandler != nil { + ret.h = postHandler + } } return ret } @@ -173,6 +175,11 @@ func (r *round) PrepareForNextBlock(currentHeight int64) (open bool) { r.openQuotingWindow() open = true } + if r.twoPhases { + // wait quoteWindowSize-1 blocks for proposer to collecting pieces + // #nosec G115 + r.roundPhaseTwoStartBlock = uint64(r.roundBaseBlock + 2*r.quoteWindowSize) + } } return open } diff --git a/x/oracle/keeper/feedermanagement/types.go b/x/oracle/keeper/feedermanagement/types.go index 0c9b992b8..9ddc374f1 100644 --- a/x/oracle/keeper/feedermanagement/types.go +++ b/x/oracle/keeper/feedermanagement/types.go @@ -6,6 +6,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/imua-xyz/imuachain/x/oracle/keeper/common" + "github.com/imua-xyz/imuachain/x/oracle/types" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) @@ -211,6 +212,12 @@ type round struct { // roundBaseBlock is the round base block of current round roundBaseBlock int64 + + // roundPhaseTwoStartBlock defines the first block when the oracle begins accepting second-phase messages containing raw data pieces during two-phase aggregation + // We delay collecting raw data pieces for several blocks after first-phase consensus to give proposers time to receive and prepare messages with raw data pieces and proofs + // Since proposers are penalized for not including necessary raw data pieces, we provide this buffer to prevent unfair punishment due to overly strict timeouts + roundPhaseTwoStartBlock uint64 + // roundID is the round ID of current round roundID int64 // status indicates the status of current round @@ -224,13 +231,12 @@ type round struct { // twoPhases indicates if the corresponding tokenfeeder requires 2-phase aggregation twoPhases bool - // rawData is original data for tokenFeeder with 2-phases aggregation rule - // a validator can provide more than one rawData for one round - rawData [][]byte - // in 2-phases aggregation, the aggregated price is the hash root of pieces of rawData, when we received every piece to recover the whole original rawData, this flag is set to true - rawDataSealed bool m *oracletypes.MerkleTree + // cachedProofForBlock keeps added proof cache from current block, used for EndBlock to update state + // we don't do any state update during oracle tx executing, so we cached the information before endBlock if any + // this will be reset on endBlock after update state + cachedProofForBlock types.Proof h common.PostAggregationHandler } diff --git a/x/oracle/keeper/msg_server_price_feed.go b/x/oracle/keeper/msg_server_price_feed.go index 69c94a20d..db48f05df 100644 --- a/x/oracle/keeper/msg_server_price_feed.go +++ b/x/oracle/keeper/msg_server_price_feed.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/hex" "strconv" "strings" "time" @@ -32,7 +33,7 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice logger := ms.Logger(ctx) validator, _ := types.ConsAddrStrFromCreator(msg.Creator) - logQuote := []interface{}{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} + logQuote := []any{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} if err := checkTimestamp(ctx, msg); err != nil { logger.Error("quote has invalid timestamp", append(logQuote, "error", err)...) @@ -41,8 +42,9 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice // goto rawData process which needs no 'aggragation', we just verify the provided piece with recoreded root which got consensus if msg.IsPhaseTwo() { - err := ms.ProcessRawData(msg) - if err != nil { + cachedRawData, err := ms.ProcessRawData(msg) + if err == nil { + logger.Info("quote of 2nd-phase added rawData of hash:%s", hex.EncodeToString(cachedRawData)) return &types.MsgCreatePriceResponse{}, nil } logger.Error("quote of 2nd-phase for rawData failed", append(logQuote, "error", err)) diff --git a/x/oracle/keeper/nst_post_aggregation.go b/x/oracle/keeper/nst_post_aggregation.go index 9660297b4..553c105cc 100644 --- a/x/oracle/keeper/nst_post_aggregation.go +++ b/x/oracle/keeper/nst_post_aggregation.go @@ -25,40 +25,6 @@ import ( // undelegate: update operator's price, operator's totalAmount, operator's totalShare, staker's share // msg(refund or slash on beaconChain): update staker's price, operator's price -type NSTAssetID string - -const ( - // NSTETHAssetAddr = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" - // TODO: we currently support NSTETH only which has capped effective balance for one validator - // TODO: this is a bad practice, and for Lz, they have different version of endpoint with different chainID - // Do the validation before invoke oracle related functions instead of check these hard code ids here. - ETHMainnetChainID = "0x7595" - ETHLocalnetChainID = "0x65" - ETHHoleskyChainID = "0x9d19" - ETHSepoliaChainID = "0x9ce1" - - NSTETHAssetIDMainnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x7595" - NSTETHAssetIDLocalnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x65" - NSTETHAssetIDHolesky NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x9d19" - NSTETHAssetIDSepolia NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x9ce1" -) - -var ( - limitedChangeNST = map[NSTAssetID]bool{ - NSTETHAssetIDMainnet: true, - NSTETHAssetIDLocalnet: true, - NSTETHAssetIDHolesky: true, - NSTETHAssetIDSepolia: true, - } - - maxEffectiveBalances = map[NSTAssetID]int{ - NSTETHAssetIDMainnet: 32, - NSTETHAssetIDLocalnet: 32, - NSTETHAssetIDHolesky: 32, - NSTETHAssetIDSepolia: 32, - } -) - // SetStakerInfos set stakerInfos for the specific assetID func (k Keeper) SetStakerInfos(ctx sdk.Context, assetID string, stakerInfos []*types.StakerInfo) { store := ctx.KVStore(k.storeKey) @@ -176,9 +142,6 @@ func (k Keeper) GetAllStakerListAssets(ctx sdk.Context) (ret []types.StakerListA } func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, stakerAddr, validatorPubkey string, amount sdkmath.Int) error { - if !IsLimitedChangeNST(assetID) { - return types.ErrNSTAssetNotSupported - } _, decimalInt, err := k.getDecimal(ctx, assetID) if err != nil { return err @@ -287,78 +250,6 @@ func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, staker return nil } -// UpdateNSTByBalanceChange updates balance info for staker under native-restaking asset of assetID when its balance changed by slash/refund on the source chain (beacon chain for eth) -func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, price types.PriceTimeRound, version int64) error { - if !IsLimitedChangeNST(assetID) { - return types.ErrNSTAssetNotSupported - } - if version != k.GetNSTVersion(ctx, assetID) { - return errors.New("version not match") - } - _, chainID, _ := assetstypes.ParseID(assetID) - rawData := []byte(price.Price) - if len(rawData) < 32 { - return errors.New("length of indicate maps for stakers should be exactly 32 bytes") - } - sl := k.GetStakerList(ctx, assetID) - if len(sl.StakerAddrs) == 0 { - return errors.New("staker list is empty") - } - stakerChanges, err := parseBalanceChangeCapped(rawData, sl) - if err != nil { - return fmt.Errorf("failed to parse balance changes: %w", err) - } - store := ctx.KVStore(k.storeKey) - for _, stakerAddr := range sl.StakerAddrs { - // if stakerAddr is not in stakerChanges, then the change would be set to 0 which is expected - change := stakerChanges[stakerAddr] - key := types.NativeTokenStakerKey(assetID, stakerAddr) - value := store.Get(key) - if value == nil { - return errors.New("stakerInfo does not exist") - } - stakerInfo := &types.StakerInfo{} - k.cdc.MustUnmarshal(value, stakerInfo) - newBalance := types.BalanceInfo{} - if length := len(stakerInfo.BalanceList); length > 0 { - newBalance = *(stakerInfo.BalanceList[length-1]) - } - newBalance.Block = uint64(ctx.BlockHeight()) - // we set index as a global reference used through all rounds - newBalance.Index++ - newBalance.Change = types.Action_ACTION_SLASH_REFUND - newBalance.RoundID = price.RoundID - // balance update are based on initial/max effective balance: 32 - maxBalance := maxEffectiveBalance(assetID) * (len(stakerInfo.ValidatorPubkeyList)) - balance := maxBalance + change - // there's one case that this delta might be more than previous Balance - // staker's validatorlist: {v1, v2, v3, v5} - // in one same block: withdraw v2, v3, v5, balance of v2, v3, v5 all be slashed by -16 - // => amount: 32*4->32(by withdraw), the validatorList of feeder will be updated on next block, so it will report the balance change of v5: -16 as in the staker's balance change, result to: 32*4->32-> 32-16*3 = -16 - // we will just ignore this misbehavior introduced by synchronize-issue, and this will be correct in next block/round - if balance > maxBalance || balance < 0 { - // balance should not be able to be reduced to 0 by balance change - return errors.New("effective balance should never exceeds 32 for one validator and should be positive") - } - - if delta := int64(balance) - newBalance.Balance; delta != 0 { - decimal, _, err := k.getDecimal(ctx, assetID) - if err != nil { - return err - } - if err := k.delegationKeeper.UpdateNSTBalance(ctx, getStakerID(stakerAddr, chainID), assetID, sdkmath.NewIntWithDecimal(delta, decimal)); err != nil { - return err - } - newBalance.Balance = int64(balance) - } - // newBalance.Balance += int64(change) - stakerInfo.Append(&newBalance) - bz := k.cdc.MustMarshal(stakerInfo) - store.Set(key, bz) - } - return nil -} - // IncreaseNSTVersion increases the version of native token for assetID func (k Keeper) IncreaseNSTVersion(ctx sdk.Context, assetID string) int64 { store := ctx.KVStore(k.storeKey) @@ -410,15 +301,6 @@ func getStakerID(stakerAddr string, chainID uint64) string { return strings.Join([]string{strings.ToLower(stakerAddr), hexutil.EncodeUint64(chainID)}, utils.DelimiterForID) } -// IsLimitChangesNST returns that is input assetID corresponding to asset which balance change has a cap limit -func IsLimitedChangeNST(assetID string) bool { - return limitedChangeNST[NSTAssetID(assetID)] -} - -func maxEffectiveBalance(assetID string) int { - return maxEffectiveBalances[NSTAssetID(assetID)] -} - func getNSTVersionFromDetID(detID string) (int64, error) { parsedDetID := strings.Split(detID, "_") if len(parsedDetID) != 2 { @@ -432,6 +314,60 @@ func getNSTVersionFromDetID(detID string) (int64, error) { } // UpdateNSTBalanceChange serves the post handling for nst balance change -func UpdateNSTBalanceChange(rawData []byte, ctx sdk.Context, k common.KeeperOracle) error { +func UpdateNSTBalanceChange(ctx sdk.Context, rawData []byte, feederID, roundID uint64, kInf common.KeeperOracle) error { + balanceChanges := &types.RawDataNST{} + kInf.MustUnmarshal(rawData, balanceChanges) + + k, ok := kInf.(Keeper) + if !ok { + return errors.New("input keeper interface type error") + } + assetID := k.GetParamsFromCache().GetAssetIDForNSTFromFeederID(feederID) + // TODO(leonz): use uint64 for version state + if balanceChanges.Version != uint64(k.GetNSTVersion(ctx, assetID)) { + return errors.New("version not match") + } + _, chainID, _ := assetstypes.ParseID(assetID) + sl := k.GetStakerList(ctx, assetID) + if len(sl.StakerAddrs) == 0 { + return errors.New("staker list is empty") + } + + store := ctx.KVStore(k.storeKey) + + for _, changeKV := range balanceChanges.NstBalanceChanges { + stakerAddr := sl.StakerAddrs[changeKV.StakerIndex] + key := types.NativeTokenStakerKey(assetID, stakerAddr) + value := store.Get(key) + if value == nil { + return errors.New("stakerInfo does not exist") + } + stakerInfo := &types.StakerInfo{} + k.cdc.MustUnmarshal(value, stakerInfo) + newBalance := types.BalanceInfo{} + if length := len(stakerInfo.BalanceList); length > 0 { + newBalance = *(stakerInfo.BalanceList[length-1]) + } + newBalance.Block = uint64(ctx.BlockHeight()) + // we set index as a global reference used through all rounds + newBalance.Index++ + newBalance.Change = types.Action_ACTION_SLASH_REFUND + newBalance.RoundID = roundID + balance := changeKV.Balance + + if delta := int64(balance) - newBalance.Balance; delta != 0 { + decimal, _, err := k.getDecimal(ctx, assetID) + if err != nil { + return err + } + if err := k.delegationKeeper.UpdateNSTBalance(ctx, getStakerID(stakerAddr, chainID), assetID, sdkmath.NewIntWithDecimal(delta, decimal)); err != nil { + return err + } + newBalance.Balance = int64(balance) + } + stakerInfo.Append(&newBalance) + bz := k.cdc.MustMarshal(stakerInfo) + store.Set(key, bz) + } return nil } diff --git a/x/oracle/keeper/post_aggregation.go b/x/oracle/keeper/post_aggregation.go index 2fd13edf2..caeaf07ee 100644 --- a/x/oracle/keeper/post_aggregation.go +++ b/x/oracle/keeper/post_aggregation.go @@ -1,21 +1,28 @@ package keeper -import "github.com/imua-xyz/imuachain/x/oracle/keeper/common" +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" +) // RegisterPostAggregation registers handler for tokenfeeder set with deterministic source which need to do some process with the deterministic aggregated result // this is used to register the post handlers served for some customer defined deterministic source oracle requirement -func (k *Keeper) RegisterPostAggregation() { +func (k Keeper) RegisterPostAggregation() { // k.BondPostAggregation(1, UpdateNSTBalanceChange) } -func (k *Keeper) BondPostAggregation(feederID int64, postHandler common.PostAggregationHandler) { +func (k Keeper) BondPostAggregation(feederID int64, postHandler common.PostAggregationHandler) { k.postHandlers[feederID] = postHandler } -func (k *Keeper) GetPostAggregation(feederID int64) (handler common.PostAggregationHandler, found bool) { +func (k Keeper) GetPostAggregation(feederID int64) (handler common.PostAggregationHandler, found bool) { if k.postHandlers == nil { return nil, false } handler, found = k.postHandlers[feederID] return } + +func (k Keeper) MustUnmarshal(bz []byte, ptr codec.ProtoMarshaler) { + k.cdc.MustUnmarshal(bz, ptr) +} diff --git a/x/oracle/keeper/prices.go b/x/oracle/keeper/prices.go index 0fdd1ccaf..3f786ba01 100644 --- a/x/oracle/keeper/prices.go +++ b/x/oracle/keeper/prices.go @@ -211,29 +211,29 @@ func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.Pri } k.IncreaseNextRoundID(ctx, tokenID) // skip post processing for nil deterministic ID - if detID == types.NilDetID { - return true - } - - // skip post processing for empty price - if len(priceTR.Price) == 0 { - return true - } - - if nstAssetID := p.GetAssetIDForNSTFromTokenID(tokenID); len(nstAssetID) > 0 { - nstVersion, err := getNSTVersionFromDetID(detID) - if err != nil || nstVersion == 0 { - logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err, "nstVersion", nstVersion, "tokenID", tokenID, "roundID", nextRoundID) - return true - } - err = k.UpdateNSTByBalanceChange(ctx, nstAssetID, priceTR, nstVersion) - if err != nil { - // we just report this error in log to notify validators - logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) - } else { - logger.Info("updated balance change for NST") - } - } + // if detID == types.NilDetID { + // return true + // } + // + // // skip post processing for empty price + // if len(priceTR.Price) == 0 { + // return true + // } + // + // if nstAssetID := p.GetAssetIDForNSTFromTokenID(tokenID); len(nstAssetID) > 0 { + // nstVersion, err := getNSTVersionFromDetID(detID) + // if err != nil || nstVersion == 0 { + // logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err, "nstVersion", nstVersion, "tokenID", tokenID, "roundID", nextRoundID) + // return true + // } + // err = k.UpdateNSTByBalanceChange(ctx, nstAssetID, priceTR, nstVersion) + // if err != nil { + // // we just report this error in log to notify validators + // logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) + // } else { + // logger.Info("updated balance change for NST") + // } + // } return true } diff --git a/x/oracle/keeper/two_phases.go b/x/oracle/keeper/two_phases.go index dbfc86255..132724391 100644 --- a/x/oracle/keeper/two_phases.go +++ b/x/oracle/keeper/two_phases.go @@ -2,26 +2,76 @@ package keeper import ( "fmt" + "sort" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common" "github.com/imua-xyz/imuachain/x/oracle/types" ) // SetNextPieceIndex sets the next-piece-index of feederID for 'node-recovery' -func (k Keeper) SetNextPieceIndex(ctx sdk.Context, feederID uint64, pieceIndex uint32) { +// func (k Keeper) SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, pieceIndex uint32) { +// store := ctx.KVStore(k.storeKey) +// key := types.TwoPhasesFeederKey(feederID) +// store.Set(key, types.Uint32Bytes(pieceIndex)) +// } + +func (k Keeper) Setup2ndPhase(ctx sdk.Context, feederID uint64, validators []string, leafCount uint32, rootHash []byte) { + k.Setup2ndPhaseNextPieceIndex(ctx, feederID, validators) + k.SetFeederTreeInfo(ctx, feederID, leafCount, rootHash) +} + +// we group the validators by feederID instead of the opposite way because when we set up or clear, +// we do this for all validators under the feederID. When changes happen to a single validator, we enter "forceSeal," +// which removes all validators under that feederID. +// Therefore, we use feederID→[]{validators,index}, not validator→[]{feederID, index} or feederID/validator→index. +// While the latter approach would make "checkAndIncrease" faster when querying the index for a specific +// validator under a specific feederID, it trades off many I/O operations with memory iteration, which isn't optimal. +func (k Keeper) Setup2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { store := ctx.KVStore(k.storeKey) - key := types.TwoPhasesFeederKey(feederID) - store.Set(key, types.Uint32Bytes(pieceIndex)) + key := types.TwoPhasesFeederValidatorsKey(feederID) + validatorIndexList := make([]*types.ValidatorIndex, 0, len(validators)) + // set next piece index for all input validators to 0 under the input feederID + if len(validators) > 0 { + for _, validator := range validators { + validatorIndexList = append(validatorIndexList, &types.ValidatorIndex{Validator: validator, NextIndex: 0}) + } + bz := k.cdc.MustMarshal(&types.FeederValidatorsIndex{ + ValidatorIndexList: validatorIndexList, + }) + store.Set(key, bz) + } + // set next piece index for feederID to 0 + key = types.TwoPhasesFeederKey(feederID) + store.Set(key, types.Uint32Bytes(0)) } -func (k Keeper) ClearNextPieceIndex(ctx sdk.Context, feederID uint64) { +func (k Keeper) Clear2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64) { store := ctx.KVStore(k.storeKey) key := types.TwoPhasesFeederKey(feederID) + // delete nextPieceIndex for feederID + store.Delete(key) + key = types.TwoPhasesFeederValidatorsKey(feederID) + // delete nextPieceIndex for all validators with feederID store.Delete(key) } +func (k Keeper) SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, nextPieceIndex uint32) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + store.Set(key, types.Uint32Bytes(nextPieceIndex)) +} + +// func (k Keeper) ClearNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64) { +// store := ctx.KVStore(k.storeKey) +// key := types.TwoPhasesFeederKey(feederID) +// store.Delete(key) +// } + +// TODO: remove this // NextPieceIndexByFeederID read directly from memory and return the next-piece-index of input feederID func (k Keeper) NextPieceIndexByFeederID(ctx sdk.Context, feederID uint64) (uint32, bool) { + // query from mem-cache return k.FeederManager.NextPieceIndexByFeederID(feederID) } @@ -37,57 +87,155 @@ func (k Keeper) CheckAndIncreaseNextPieceIndex(ctx sdk.Context, validator string return 0, fmt.Errorf("piece_index_check_failed: feederID:%d, max_piece_index:%d, got:%d", feederID, maxPieceIndex, nextPieceIndex) } store := ctx.KVStore(k.storeKey) - key := types.TwoPhasesValidatorPieceKey(validator, feederID) + // key := types.TwoPhasesValidatorPieceKey(validator, feederID) + key := types.TwoPhasesFeederValidatorsKey(feederID) bz := store.Get(key) if bz == nil { return 0, fmt.Errorf("piece_index_check_failed: validator_not_found: validator:%s, feeder_id:%d", validator, feederID) } - expectedPieceIndex := types.BytesToUint32(bz) - if nextPieceIndex != expectedPieceIndex { - return 0, fmt.Errorf("piece_index_check_failed: non_conseecutive: expected:%d, recived:%d", expectedPieceIndex, nextPieceIndex) + feederValidatorsIndex := &types.FeederValidatorsIndex{} + k.cdc.MustUnmarshal(bz, feederValidatorsIndex) + for _, validatorIndex := range feederValidatorsIndex.ValidatorIndexList { + if validatorIndex.Validator == validator { + if validatorIndex.NextIndex == nextPieceIndex { + validatorIndex.NextIndex++ + bz = k.cdc.MustMarshal(feederValidatorsIndex) + store.Set(key, bz) + return nextPieceIndex + 1, nil + } + return 0, fmt.Errorf("piece_index_check_failed: non_conseecutive: expected:%d, recived:%d", validatorIndex.NextIndex, nextPieceIndex) + } } - store.Set(key, types.Uint32Bytes(nextPieceIndex+1)) - return nextPieceIndex + 1, nil + return 0, fmt.Errorf("piece_index_check_failed: next_piece_index not found for valdiator:%s", validator) } -func (k Keeper) Setup2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { +// used for recovery, otherwise mem-cache are used directly for reading +func (k Keeper) SetRawDataPiece(ctx sdk.Context, feederID uint64, pieceIndex uint32, rawData []byte) { store := ctx.KVStore(k.storeKey) - // 1. set nextPieceIndex for feederID, first piece index is 0 - store.Set(types.TwoPhasesFeederKey(feederID), types.Uint32Bytes(0)) + key := types.TwoPhasesFeederRawDataKey(feederID, pieceIndex) + store.Set(key, rawData) +} - // 2. set nextPieceIndex for all activeValidators, fisr piece index is 0 - for _, validator := range validators { - store.Set(types.TwoPhasesValidatorPieceKey(validator, feederID), types.Uint32Bytes(0)) +func (k Keeper) GetRawDataPieces(ctx sdk.Context, feederID uint64) ([][]byte, error) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + bz := store.Get(key) + if bz == nil { + return nil, nil } + nextPieceIndex := types.BytesToUint32(bz) + if nextPieceIndex == 0 { + return nil, nil + } + ret := make([][]byte, 0, nextPieceIndex) + for i := uint32(0); i < nextPieceIndex; i++ { + key = types.TwoPhasesFeederRawDataKey(feederID, i) + bz = store.Get(key) + if bz == nil { + // this should not happen, we got something wrong in db + return nil, fmt.Errorf("there's something wrong in db, miss piece:%d of rawData for feederID:%d", i, feederID) + } + ret = append(ret, bz) + } + return ret, nil } -func (k Keeper) Clear2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { - // TODO(leonz): implement me - // 1. remove feederID->nextPieceIndex, 2. remove validator/feederID->nextPieceIndex +func (k Keeper) SetFeederTreeInfo(ctx sdk.Context, feederID uint64, count uint32, rootHash []byte) { + if count == 0 || len(rootHash) != common.HashLength { + return + } store := ctx.KVStore(k.storeKey) - store.Delete(types.TwoPhasesFeederKey(feederID)) - // 2. remove nextPieceIndex for validators - for _, validator := range validators { - store.Delete(types.TwoPhasesValidatorPieceKey(validator, feederID)) + key := types.TwoPhaseFeederTreeInfoKey(feederID) + treeInfo := &types.TreeInfo{ + LeafCount: count, + RootHash: rootHash, } - + bz := k.cdc.MustMarshal(treeInfo) + store.Set(key, bz) } -// set feederID/pieceIndex -> rawData(]byte) - -// read feederID/pieceIndex -> rawData([]byte) - -// read all pieces of rawData from feederID/ - -// clear all rawData for feederID/ +func (k Keeper) GetFeederTreeInfo(ctx sdk.Context, feederID uint64) (uint32, []byte) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhaseFeederTreeInfoKey(feederID) + bz := store.Get(key) + if bz == nil { + return 0, nil + } + treeInfo := &types.TreeInfo{} + k.cdc.MustUnmarshal(bz, treeInfo) + return treeInfo.LeafCount, treeInfo.RootHash +} -// update feederID -> proof([][]byte) +// used for recovery, otherwise, mem-cache are used directly for reading +func (k Keeper) AddNodesToMerkleTree(ctx sdk.Context, feederID uint64, proof []*types.HashNode) { + if len(proof) == 0 { + return + } + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederProofKey(feederID) + bz := store.Get(key) + merkle := &types.FlattenTree{} + k.cdc.MustUnmarshal(bz, merkle) + nodes := merkle.Nodes + sort.Slice(proof, func(i, j int) bool { return proof[i].Index < proof[j].Index }) + uniqueOrderedProof := make([]*types.HashNode, 0, len(proof)) + for i := 0; i < len(proof); i++ { + if i == 0 || proof[i].Index != proof[i-1].Index { + uniqueOrderedProof = append(uniqueOrderedProof, proof[i]) + } + } + l1 := len(nodes) + l2 := len(uniqueOrderedProof) + i := 0 + j := 0 + newList := make([]*types.HashNode, 0, l1+l2) + for i < l1 && j < l2 { + switch { + case nodes[i].Index == uniqueOrderedProof[j].Index: + newList = append(newList, nodes[i]) + i++ + j++ + case nodes[i].Index < uniqueOrderedProof[j].Index: + newList = append(newList, nodes[i]) + i++ + case nodes[i].Index > uniqueOrderedProof[j].Index: + newList = append(newList, proof[j]) + j++ + } + } + if i < l1 { + newList = append(newList, nodes[i:]...) + } else { + newList = append(newList, uniqueOrderedProof[j:]...) + } + merkle.Nodes = newList + bz = k.cdc.MustMarshal(merkle) + store.Set(key, bz) +} -// clear the whole proof under feederID +func (k Keeper) GetNodesFromMerkleTree(ctx sdk.Context, feederID uint64) []*types.HashNode { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederProofKey(feederID) + bz := store.Get(key) + if bz == nil { + return nil + } + mt := &types.FlattenTree{} + k.cdc.MustUnmarshal(bz, mt) + return mt.Nodes +} // clear feederID/, clear: // 1. rawData // 2. proof -func (k Keeper) Clear2ndPhases(ctx sdk.Context, feederID uint64) { - +func (k Keeper) Clear2ndPhase(ctx sdk.Context, feederID uint64, rootIndex uint32) { + store := ctx.KVStore(k.storeKey) + // clear rawData + for i := uint32(0); i <= rootIndex; i++ { + store.Delete(types.TwoPhasesFeederRawDataKey(feederID, i)) + } + // clear proof + store.Delete(types.TwoPhasesFeederProofKey(feederID)) + // clear indexex + k.Clear2ndPhaseNextPieceIndex(ctx, feederID) } diff --git a/x/oracle/types/key_two_phase.go b/x/oracle/types/key_two_phase.go index 1650f4ee9..05a7dadb2 100644 --- a/x/oracle/types/key_two_phase.go +++ b/x/oracle/types/key_two_phase.go @@ -1,9 +1,12 @@ package types const ( - TwoPhasesPrefix = "TwoPhases/" - FeederPrefix = TwoPhasesPrefix + "feeder/" - ValidatorPrefix = TwoPhasesPrefix + "validator/" + TwoPhasesPrefix = "TwoPhases/" + FeederPrefix = TwoPhasesPrefix + "feeder/" + FeederValidatorsPrefix = TwoPhasesPrefix + "validators/" + FeederRawDataPrefix = TwoPhasesPrefix + "rawData/" + FeederProofPrefix = TwoPhasesPrefix + "proof/" + FeederTreeInfoPrefix = TwoPhasesPrefix + "treeInfo/" ) func TwoPhasesKeyPrefix() []byte { @@ -14,8 +17,8 @@ func TwoPhasesFeederKeyPrefix() []byte { return []byte(FeederPrefix) } -func TwoPhasesValidatorKeyPrefix() []byte { - return []byte(ValidatorPrefix) +func TwoPhasesFeederValidatorsKeyPrefix() []byte { + return []byte(FeederValidatorsPrefix) } func TwoPhasesFeederKey(feederID uint64) []byte { @@ -25,11 +28,46 @@ func TwoPhasesFeederKey(feederID uint64) []byte { return key } -func TwoPhasesValidatorPieceKey(validator string, feederID uint64) []byte { +func TwoPhasesFeederValidatorsKey(feederID uint64) []byte { var key []byte - key = append(key, ValidatorPrefix...) - key = append(key, []byte(validator)...) + key = append(key, FeederValidatorsPrefix...) + key = append(key, Uint64Bytes(feederID)...) + return key +} + +func TwoPhasesFeederRawDataKeyPrefix(feederID uint64) []byte { + var key []byte + key = append(key, FeederRawDataPrefix...) + key = append(key, DelimiterForCombinedKey) + key = append(key, Uint64Bytes(feederID)...) key = append(key, DelimiterForCombinedKey) + return key +} + +func TwoPhasesFeederRawDataKey(feederID uint64, index uint32) []byte { + var key []byte + key = append(key, TwoPhasesFeederRawDataKeyPrefix(feederID)...) + return append(key, Uint32Bytes(index)...) +} + +func TwoPhasesFeederProofKeyPrefix() []byte { + return []byte(FeederProofPrefix) +} + +func TwoPhasesFeederProofKey(feederID uint64) []byte { + var key []byte + key = append(key, FeederProofPrefix...) + key = append(key, Uint64Bytes(feederID)...) + return key +} + +func TwoPhaseFeederTreeInfoKeyPrefix() []byte { + return []byte(FeederTreeInfoPrefix) +} + +func TwoPhaseFeederTreeInfoKey(feederID uint64) []byte { + var key []byte + key = append(key, FeederTreeInfoPrefix...) key = append(key, Uint64Bytes(feederID)...) return key } diff --git a/x/oracle/types/merkletree.go b/x/oracle/types/merkletree.go index 1325533a7..8bc448fba 100644 --- a/x/oracle/types/merkletree.go +++ b/x/oracle/types/merkletree.go @@ -41,13 +41,20 @@ func (p *Proof) getHashByIndex(index uint32) []byte { return nil } -// hashNode represents a node including index and hash -type HashNode struct { - Index uint32 - Hash []byte +func (m *MerkleTree) SetRawDataPieces(pieces [][]byte) { + m.pieces = pieces +} + +func (m *MerkleTree) SetProofNodes(nodes []*HashNode) { + for _, node := range nodes { + n := m.t[node.Index] + if n == nil { + continue + } + n.hash = node.Hash + } } -// func (m *MerkleTree) getPathFromLeafIndex(index uint32) []uint32 { func (m *MerkleTree) ProofPathFromLeafIndex(index uint32) []uint32 { if index >= m.leafCount { return nil @@ -211,7 +218,7 @@ func (m *MerkleTree) PieceByIndex(targetIndex uint32) ([]byte, bool) { // return rawData as a whole, with true/false to tell if we got the completed rawData // when fasel, the returned first value should be nil func (m *MerkleTree) CompleteRawData() ([]byte, bool) { - if len(m.pieces) < int(m.leafCount) { + if m == nil || len(m.pieces) < int(m.leafCount) || m.leafCount == 0 { return nil, false } if len(m.rawData) > 0 { @@ -229,7 +236,11 @@ func (m *MerkleTree) CompleteRawData() ([]byte, bool) { // only when MerkleTree is set to non-zero leafCount with less amount of pieces than that leafCount we got false returned // so when the return value is false, it also indicates that this MerkleTree is collecting pieces func (m *MerkleTree) Completed() bool { - return len(m.pieces) == int(m.leafCount) + return m != nil && len(m.pieces) == int(m.leafCount) +} + +func (m *MerkleTree) CollectingRawData() bool { + return m != nil && len(m.pieces) < int(m.leafCount) } // (0, true) means the first leaf node is cached @@ -268,15 +279,15 @@ func (m *MerkleTree) RootHash() []byte { return m.root } +func (m *MerkleTree) RootIndex() uint32 { + return m.rootIndex +} + // NewMT new a merkle tree initialized with the topology from input pieceSize and totalSize -func NewMT(pieceSize, totalSize uint32, root []byte) *MerkleTree { - if totalSize == 0 { +func NewMT(pieceSize, leafCount uint32, root []byte) *MerkleTree { + if leafCount < 1 { return nil } - leafCount := totalSize / pieceSize - if totalSize%pieceSize > 0 { - leafCount++ - } originalLeafCount := leafCount ret := &MerkleTree{ diff --git a/x/oracle/types/message_create_price.go b/x/oracle/types/message_create_price.go index 0107f2707..31d666770 100644 --- a/x/oracle/types/message_create_price.go +++ b/x/oracle/types/message_create_price.go @@ -49,6 +49,10 @@ func (msg *MsgCreatePrice) ValidateBasic() error { return nil } +func (msg *MsgCreatePrice) IsNotTwoPhases() bool { + return msg.Phase == AggregationPhaseUnspecified +} + func (msg *MsgCreatePrice) IsPhaseOne() bool { return msg.Phase == AggregationPhaseOne } diff --git a/x/oracle/types/params.go b/x/oracle/types/params.go index cdce66c30..fade7b2ad 100644 --- a/x/oracle/types/params.go +++ b/x/oracle/types/params.go @@ -507,8 +507,14 @@ func (p Params) GetTokenIDFromAssetID(assetID string) int { return 0 } -func (p Params) GetAssetIDForNSTFromTokenID(tokenID uint64) string { - assetIDs := p.GetAssetIDsFromTokenID(tokenID) +func (p Params) GetAssetIDForNSTFromFeederID(feederID uint64) string { + tokenID := p.TokenFeeders[feederID].TokenID + + if tokenID >= uint64(len(p.Tokens)) { + return "" + } + assetIDs := strings.Split(p.Tokens[tokenID].AssetID, ",") + for _, assetID := range assetIDs { if nstChain, ok := strings.CutPrefix(strings.ToLower(assetID), NSTIDPrefix); ok { if NSTChain, ok := NSTChainsInverted[nstChain]; ok { @@ -519,13 +525,6 @@ func (p Params) GetAssetIDForNSTFromTokenID(tokenID uint64) string { return "" } -func (p Params) GetAssetIDsFromTokenID(tokenID uint64) []string { - if tokenID >= uint64(len(p.Tokens)) { - return nil - } - return strings.Split(p.Tokens[tokenID].AssetID, ",") -} - func (p Params) IsDeterministicSource(sourceID uint64) bool { return p.Sources[sourceID].Deterministic } diff --git a/x/oracle/types/rawdata_nst.pb.go b/x/oracle/types/rawdata_nst.pb.go index 5e1dbfa38..c852f06c1 100644 --- a/x/oracle/types/rawdata_nst.pb.go +++ b/x/oracle/types/rawdata_nst.pb.go @@ -27,7 +27,7 @@ type NSTKV struct { // staker index for a nst defined on imuachain side StakerIndex uint32 `protobuf:"varint,1,opt,name=staker_index,json=stakerIndex,proto3" json:"staker_index,omitempty"` // balance change since last update - BalanceChange int64 `protobuf:"varint,2,opt,name=balance_change,json=balanceChange,proto3" json:"balance_change,omitempty"` + Balance int64 `protobuf:"varint,2,opt,name=balance,proto3" json:"balance,omitempty"` } func (m *NSTKV) Reset() { *m = NSTKV{} } @@ -70,17 +70,18 @@ func (m *NSTKV) GetStakerIndex() uint32 { return 0 } -func (m *NSTKV) GetBalanceChange() int64 { +func (m *NSTKV) GetBalance() int64 { if m != nil { - return m.BalanceChange + return m.Balance } return 0 } // RawDataNST represents balance changes of all stakers for a NST type RawDataNST struct { + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed - NstBalanceChanges []*NSTKV `protobuf:"bytes,1,rep,name=nst_balance_changes,json=nstBalanceChanges,proto3" json:"nst_balance_changes,omitempty"` + NstBalanceChanges []*NSTKV `protobuf:"bytes,2,rep,name=nst_balance_changes,json=nstBalanceChanges,proto3" json:"nst_balance_changes,omitempty"` } func (m *RawDataNST) Reset() { *m = RawDataNST{} } @@ -116,6 +117,13 @@ func (m *RawDataNST) XXX_DiscardUnknown() { var xxx_messageInfo_RawDataNST proto.InternalMessageInfo +func (m *RawDataNST) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + func (m *RawDataNST) GetNstBalanceChanges() []*NSTKV { if m != nil { return m.NstBalanceChanges @@ -133,23 +141,24 @@ func init() { } var fileDescriptor_77c72bbd82fbb1be = []byte{ - // 254 bytes of a gzipped FileDescriptorProto + // 267 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcd, 0xcc, 0x2d, 0x4d, 0x4c, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0xcf, 0x2f, 0x4a, 0x4c, 0xce, 0x49, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x4a, 0x2c, 0x4f, 0x49, 0x2c, 0x49, 0x8c, 0xcf, 0x2b, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x12, 0x86, 0x2b, 0xd3, 0x83, 0x28, 0xd3, 0x2b, 0x33, 0x54, 0x0a, 0xe4, 0x62, 0xf5, + 0xc9, 0x17, 0x12, 0x86, 0x2b, 0xd3, 0x83, 0x28, 0xd3, 0x2b, 0x33, 0x54, 0x72, 0xe1, 0x62, 0xf5, 0x0b, 0x0e, 0xf1, 0x0e, 0x13, 0x52, 0xe4, 0xe2, 0x29, 0x2e, 0x49, 0xcc, 0x4e, 0x2d, 0x8a, 0xcf, 0xcc, 0x4b, 0x49, 0xad, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x86, 0x88, 0x79, 0x82, - 0x84, 0x84, 0x54, 0xb9, 0xf8, 0x92, 0x12, 0x73, 0x12, 0xf3, 0x92, 0x53, 0xe3, 0x93, 0x33, 0x12, - 0xf3, 0xd2, 0x53, 0x25, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0x78, 0xa1, 0xa2, 0xce, 0x60, 0x41, - 0xa5, 0x08, 0x2e, 0xae, 0xa0, 0xc4, 0x72, 0x97, 0xc4, 0x92, 0x44, 0xbf, 0xe0, 0x10, 0x21, 0x2f, - 0x2e, 0xe1, 0xbc, 0xe2, 0x92, 0x78, 0x54, 0x8d, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, - 0x52, 0x7a, 0x58, 0xdc, 0xa4, 0x07, 0x76, 0x50, 0x90, 0x60, 0x5e, 0x71, 0x89, 0x13, 0xb2, 0xc1, - 0xc5, 0x4e, 0x6e, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, - 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x93, 0x9e, - 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x32, 0x52, 0xb7, 0xa2, 0xb2, 0x4a, - 0x1f, 0x11, 0x2c, 0x15, 0xb0, 0x80, 0x29, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x07, 0x88, - 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x28, 0x64, 0x99, 0x8c, 0x39, 0x01, 0x00, 0x00, + 0x84, 0x84, 0x24, 0xb8, 0xd8, 0x93, 0x12, 0x73, 0x12, 0xf3, 0x92, 0x53, 0x25, 0x98, 0x14, 0x18, + 0x35, 0x98, 0x83, 0x60, 0x5c, 0xa5, 0x22, 0x2e, 0xae, 0xa0, 0xc4, 0x72, 0x97, 0xc4, 0x92, 0x44, + 0xbf, 0xe0, 0x10, 0x90, 0xba, 0xb2, 0xd4, 0xa2, 0xe2, 0xcc, 0xfc, 0x3c, 0xb0, 0x29, 0x2c, 0x41, + 0x30, 0xae, 0x90, 0x17, 0x97, 0x70, 0x5e, 0x71, 0x49, 0x3c, 0x54, 0x5b, 0x7c, 0x72, 0x46, 0x62, + 0x5e, 0x7a, 0x6a, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x94, 0x1e, 0x16, 0x07, 0xea, + 0x81, 0x5d, 0x17, 0x24, 0x98, 0x57, 0x5c, 0xe2, 0x04, 0xd1, 0xe5, 0x0c, 0xd1, 0xe4, 0xe4, 0x76, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, + 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x3a, 0xe9, 0x99, 0x25, 0x19, 0xa5, + 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x20, 0x23, 0x75, 0x2b, 0x2a, 0xab, 0xf4, 0x11, 0x61, 0x54, + 0x01, 0x0b, 0xa5, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0x70, 0xe8, 0x18, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x19, 0xc4, 0xa6, 0x05, 0x46, 0x01, 0x00, 0x00, } func (m *NSTKV) Marshal() (dAtA []byte, err error) { @@ -172,8 +181,8 @@ func (m *NSTKV) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BalanceChange != 0 { - i = encodeVarintRawdataNst(dAtA, i, uint64(m.BalanceChange)) + if m.Balance != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.Balance)) i-- dAtA[i] = 0x10 } @@ -216,9 +225,14 @@ func (m *RawDataNST) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintRawdataNst(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } } + if m.Version != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -242,8 +256,8 @@ func (m *NSTKV) Size() (n int) { if m.StakerIndex != 0 { n += 1 + sovRawdataNst(uint64(m.StakerIndex)) } - if m.BalanceChange != 0 { - n += 1 + sovRawdataNst(uint64(m.BalanceChange)) + if m.Balance != 0 { + n += 1 + sovRawdataNst(uint64(m.Balance)) } return n } @@ -254,6 +268,9 @@ func (m *RawDataNST) Size() (n int) { } var l int _ = l + if m.Version != 0 { + n += 1 + sovRawdataNst(uint64(m.Version)) + } if len(m.NstBalanceChanges) > 0 { for _, e := range m.NstBalanceChanges { l = e.Size() @@ -319,9 +336,9 @@ func (m *NSTKV) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BalanceChange", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) } - m.BalanceChange = 0 + m.Balance = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRawdataNst @@ -331,7 +348,7 @@ func (m *NSTKV) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BalanceChange |= int64(b&0x7F) << shift + m.Balance |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -387,6 +404,25 @@ func (m *RawDataNST) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NstBalanceChanges", wireType) } diff --git a/x/oracle/types/two_phases.pb.go b/x/oracle/types/two_phases.pb.go new file mode 100644 index 000000000..1d579da07 --- /dev/null +++ b/x/oracle/types/two_phases.pb.go @@ -0,0 +1,1145 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: imuachain/oracle/v1/two_phases.proto + +package types + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Nonce is a message that contains a nonce for a feeder +type ValidatorIndex struct { + // FeederID is the ID of the feeder that corresponding to the nonce + Validator string `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator,omitempty"` + // value is the nonce value + NextIndex uint32 `protobuf:"varint,2,opt,name=next_index,json=nextIndex,proto3" json:"next_index,omitempty"` +} + +func (m *ValidatorIndex) Reset() { *m = ValidatorIndex{} } +func (m *ValidatorIndex) String() string { return proto.CompactTextString(m) } +func (*ValidatorIndex) ProtoMessage() {} +func (*ValidatorIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{0} +} +func (m *ValidatorIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorIndex.Merge(m, src) +} +func (m *ValidatorIndex) XXX_Size() int { + return m.Size() +} +func (m *ValidatorIndex) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorIndex proto.InternalMessageInfo + +func (m *ValidatorIndex) GetValidator() string { + if m != nil { + return m.Validator + } + return "" +} + +func (m *ValidatorIndex) GetNextIndex() uint32 { + if m != nil { + return m.NextIndex + } + return 0 +} + +// ValidatorNonce is a message that contains the nonces for a validator +type FeederValidatorsIndex struct { + // nonces is the list of nonces for the feeders + ValidatorIndexList []*ValidatorIndex `protobuf:"bytes,2,rep,name=validator_index_list,json=validatorIndexList,proto3" json:"validator_index_list,omitempty"` +} + +func (m *FeederValidatorsIndex) Reset() { *m = FeederValidatorsIndex{} } +func (m *FeederValidatorsIndex) String() string { return proto.CompactTextString(m) } +func (*FeederValidatorsIndex) ProtoMessage() {} +func (*FeederValidatorsIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{1} +} +func (m *FeederValidatorsIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FeederValidatorsIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FeederValidatorsIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FeederValidatorsIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeederValidatorsIndex.Merge(m, src) +} +func (m *FeederValidatorsIndex) XXX_Size() int { + return m.Size() +} +func (m *FeederValidatorsIndex) XXX_DiscardUnknown() { + xxx_messageInfo_FeederValidatorsIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_FeederValidatorsIndex proto.InternalMessageInfo + +func (m *FeederValidatorsIndex) GetValidatorIndexList() []*ValidatorIndex { + if m != nil { + return m.ValidatorIndexList + } + return nil +} + +type HashNode struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashNode) Reset() { *m = HashNode{} } +func (m *HashNode) String() string { return proto.CompactTextString(m) } +func (*HashNode) ProtoMessage() {} +func (*HashNode) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{2} +} +func (m *HashNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashNode.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashNode.Merge(m, src) +} +func (m *HashNode) XXX_Size() int { + return m.Size() +} +func (m *HashNode) XXX_DiscardUnknown() { + xxx_messageInfo_HashNode.DiscardUnknown(m) +} + +var xxx_messageInfo_HashNode proto.InternalMessageInfo + +func (m *HashNode) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *HashNode) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// Proof represents all hash nodes of a Mekle tree with indexes +type FlattenTree struct { + Nodes []*HashNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (m *FlattenTree) Reset() { *m = FlattenTree{} } +func (m *FlattenTree) String() string { return proto.CompactTextString(m) } +func (*FlattenTree) ProtoMessage() {} +func (*FlattenTree) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{3} +} +func (m *FlattenTree) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlattenTree) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FlattenTree.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FlattenTree) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlattenTree.Merge(m, src) +} +func (m *FlattenTree) XXX_Size() int { + return m.Size() +} +func (m *FlattenTree) XXX_DiscardUnknown() { + xxx_messageInfo_FlattenTree.DiscardUnknown(m) +} + +var xxx_messageInfo_FlattenTree proto.InternalMessageInfo + +func (m *FlattenTree) GetNodes() []*HashNode { + if m != nil { + return m.Nodes + } + return nil +} + +type TreeInfo struct { + LeafCount uint32 `protobuf:"varint,1,opt,name=leaf_count,json=leafCount,proto3" json:"leaf_count,omitempty"` + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` +} + +func (m *TreeInfo) Reset() { *m = TreeInfo{} } +func (m *TreeInfo) String() string { return proto.CompactTextString(m) } +func (*TreeInfo) ProtoMessage() {} +func (*TreeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{4} +} +func (m *TreeInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TreeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TreeInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TreeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TreeInfo.Merge(m, src) +} +func (m *TreeInfo) XXX_Size() int { + return m.Size() +} +func (m *TreeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TreeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TreeInfo proto.InternalMessageInfo + +func (m *TreeInfo) GetLeafCount() uint32 { + if m != nil { + return m.LeafCount + } + return 0 +} + +func (m *TreeInfo) GetRootHash() []byte { + if m != nil { + return m.RootHash + } + return nil +} + +func init() { + proto.RegisterType((*ValidatorIndex)(nil), "imuachain.oracle.v1.ValidatorIndex") + proto.RegisterType((*FeederValidatorsIndex)(nil), "imuachain.oracle.v1.FeederValidatorsIndex") + proto.RegisterType((*HashNode)(nil), "imuachain.oracle.v1.HashNode") + proto.RegisterType((*FlattenTree)(nil), "imuachain.oracle.v1.FlattenTree") + proto.RegisterType((*TreeInfo)(nil), "imuachain.oracle.v1.TreeInfo") +} + +func init() { + proto.RegisterFile("imuachain/oracle/v1/two_phases.proto", fileDescriptor_0762117ab9aef571) +} + +var fileDescriptor_0762117ab9aef571 = []byte{ + // 352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x4e, 0xf2, 0x40, + 0x14, 0x85, 0x19, 0xfe, 0x1f, 0x43, 0x07, 0x75, 0x31, 0x62, 0xd2, 0x44, 0x69, 0x48, 0x75, 0xc1, + 0x42, 0xdb, 0x20, 0x3e, 0x01, 0x26, 0x8d, 0x24, 0xea, 0xa2, 0x51, 0x17, 0x6e, 0x9a, 0xa1, 0xbd, + 0xd8, 0x49, 0xca, 0x0c, 0xe9, 0x0c, 0xb5, 0xf8, 0x14, 0x3e, 0x96, 0x4b, 0x96, 0x2e, 0x0d, 0xbc, + 0x88, 0x99, 0x56, 0x40, 0x12, 0x76, 0x73, 0x4f, 0xce, 0x3d, 0xf7, 0xcb, 0x1c, 0x7c, 0xce, 0xc6, + 0x53, 0x1a, 0xc6, 0x94, 0x71, 0x57, 0xa4, 0x34, 0x4c, 0xc0, 0xcd, 0xba, 0xae, 0x7a, 0x13, 0xc1, + 0x24, 0xa6, 0x12, 0xa4, 0x33, 0x49, 0x85, 0x12, 0xe4, 0x68, 0xed, 0x72, 0x4a, 0x97, 0x93, 0x75, + 0xed, 0x7b, 0x7c, 0xf8, 0x4c, 0x13, 0x16, 0x51, 0x25, 0xd2, 0x01, 0x8f, 0x20, 0x27, 0xa7, 0xd8, + 0xc8, 0x56, 0x8a, 0x89, 0xda, 0xa8, 0x63, 0xf8, 0x1b, 0x81, 0xb4, 0x30, 0xe6, 0x90, 0xab, 0x80, + 0x69, 0xaf, 0x59, 0x6d, 0xa3, 0xce, 0x81, 0x6f, 0x68, 0xa5, 0x58, 0xb6, 0x39, 0x3e, 0xf6, 0x00, + 0x22, 0x48, 0xd7, 0xa1, 0xb2, 0x4c, 0x7d, 0xc2, 0xcd, 0x75, 0x48, 0xb9, 0x1c, 0x24, 0x4c, 0x2a, + 0xb3, 0xda, 0xfe, 0xd7, 0x69, 0x5c, 0x9d, 0x39, 0x3b, 0xd8, 0x9c, 0x6d, 0x30, 0x9f, 0x64, 0x5b, + 0xf3, 0x1d, 0x93, 0xca, 0xbe, 0xc6, 0xf5, 0x5b, 0x2a, 0xe3, 0x07, 0x11, 0x01, 0x69, 0xe2, 0x5a, + 0x49, 0x85, 0x0a, 0xaa, 0x72, 0x20, 0x04, 0xff, 0x8f, 0xa9, 0x8c, 0x0b, 0xd4, 0x7d, 0xbf, 0x78, + 0xdb, 0x7d, 0xdc, 0xf0, 0x12, 0xaa, 0x14, 0xf0, 0xc7, 0x14, 0x80, 0xf4, 0x70, 0x8d, 0x8b, 0x08, + 0xa4, 0x89, 0x0a, 0x98, 0xd6, 0x4e, 0x98, 0xd5, 0x19, 0xbf, 0xf4, 0xda, 0x1e, 0xae, 0xeb, 0xe5, + 0x01, 0x1f, 0x09, 0xfd, 0x29, 0x09, 0xd0, 0x51, 0x10, 0x8a, 0x29, 0x57, 0xbf, 0xe7, 0x0d, 0xad, + 0xdc, 0x68, 0x81, 0x9c, 0x60, 0x23, 0x15, 0x42, 0x05, 0x7f, 0x38, 0xea, 0x5a, 0xd0, 0x91, 0x7d, + 0xef, 0x73, 0x61, 0xa1, 0xf9, 0xc2, 0x42, 0xdf, 0x0b, 0x0b, 0x7d, 0x2c, 0xad, 0xca, 0x7c, 0x69, + 0x55, 0xbe, 0x96, 0x56, 0xe5, 0xe5, 0xe2, 0x95, 0xa9, 0x78, 0x3a, 0x74, 0x42, 0x31, 0x76, 0x35, + 0xd1, 0x65, 0x3e, 0x7b, 0x77, 0x37, 0x4d, 0xe7, 0xab, 0xae, 0xd5, 0x6c, 0x02, 0x72, 0xb8, 0x57, + 0x94, 0xdc, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x9f, 0x5c, 0x2f, 0x0c, 0x02, 0x00, 0x00, +} + +func (m *ValidatorIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextIndex != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.NextIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.Validator) > 0 { + i -= len(m.Validator) + copy(dAtA[i:], m.Validator) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.Validator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FeederValidatorsIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FeederValidatorsIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FeederValidatorsIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidatorIndexList) > 0 { + for iNdEx := len(m.ValidatorIndexList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorIndexList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTwoPhases(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *HashNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FlattenTree) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlattenTree) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlattenTree) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTwoPhases(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TreeInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TreeInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TreeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0x12 + } + if m.LeafCount != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.LeafCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTwoPhases(dAtA []byte, offset int, v uint64) int { + offset -= sovTwoPhases(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValidatorIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Validator) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + if m.NextIndex != 0 { + n += 1 + sovTwoPhases(uint64(m.NextIndex)) + } + return n +} + +func (m *FeederValidatorsIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorIndexList) > 0 { + for _, e := range m.ValidatorIndexList { + l = e.Size() + n += 1 + l + sovTwoPhases(uint64(l)) + } + } + return n +} + +func (m *HashNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTwoPhases(uint64(m.Index)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + return n +} + +func (m *FlattenTree) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovTwoPhases(uint64(l)) + } + } + return n +} + +func (m *TreeInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeafCount != 0 { + n += 1 + sovTwoPhases(uint64(m.LeafCount)) + } + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + return n +} + +func sovTwoPhases(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTwoPhases(x uint64) (n int) { + return sovTwoPhases(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValidatorIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextIndex", wireType) + } + m.NextIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FeederValidatorsIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FeederValidatorsIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FeederValidatorsIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndexList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorIndexList = append(m.ValidatorIndexList, &ValidatorIndex{}) + if err := m.ValidatorIndexList[len(m.ValidatorIndexList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlattenTree) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlattenTree: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlattenTree: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &HashNode{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TreeInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TreeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TreeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafCount", wireType) + } + m.LeafCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeafCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTwoPhases(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTwoPhases + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTwoPhases + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTwoPhases + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTwoPhases = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTwoPhases = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTwoPhases = fmt.Errorf("proto: unexpected end of group") +)