From 44c6f77801fd5a359f55862817a26afe0d28a234 Mon Sep 17 00:00:00 2001 From: leonz789 Date: Mon, 24 Feb 2025 10:04:53 +0800 Subject: [PATCH 1/2] feat: introduce 2-phases aggregation --- app/ante/cosmos/context.go | 2 +- app/ante/cosmos/fees.go | 10 +- app/ante/cosmos/min_price.go | 2 +- app/ante/cosmos/oracle_two_phases.go | 42 ++ app/ante/cosmos/sigverify.go | 45 +- app/ante/cosmos/txsize_gas.go | 9 +- app/ante/evm/fee_market.go | 2 +- app/ante/handler_options.go | 1 + app/ante/utils/interfaces.go | 6 + app/ante/utils/oracle.go | 27 +- app/app.go | 17 +- app/mempool.go | 280 ++++++++++ app/proposal_handler.go | 1 + client/docs/swagger-ui/swagger.json | 40 ++ proto/imuachain/oracle/v1/params.proto | 3 + proto/imuachain/oracle/v1/rawdata_nst.proto | 19 + proto/imuachain/oracle/v1/tx.proto | 16 +- x/assets/types/keys.go | 2 +- x/oracle/keeper/abci.go | 24 + x/oracle/keeper/common/two_phases.go | 8 + x/oracle/keeper/feedermanagement/caches.go | 69 ++- .../keeper/feedermanagement/feedermanager.go | 99 +++- .../feedermanager_two_phases.go | 140 +++++ .../keeper/feedermanagement/helper_test.go | 4 +- x/oracle/keeper/feedermanagement/round.go | 32 +- x/oracle/keeper/feedermanagement/types.go | 12 + x/oracle/keeper/keeper.go | 14 +- x/oracle/keeper/msg_server_price_feed.go | 14 +- x/oracle/keeper/nonce.go | 4 +- ...ative_token.go => nst_post_aggregation.go} | 6 + x/oracle/keeper/post_aggregation.go | 21 + x/oracle/keeper/two_phases.go | 93 ++++ x/oracle/module_beginblock.go | 4 +- x/oracle/types/codec.go | 2 + x/oracle/types/key_two_phase.go | 35 ++ x/oracle/types/merkletree.go | 384 +++++++++++++ x/oracle/types/merkletree_test.go | 82 +++ x/oracle/types/message_create_price.go | 24 + x/oracle/types/params.go | 13 + x/oracle/types/params.pb.go | 135 +++-- x/oracle/types/rawdata_nst.pb.go | 527 ++++++++++++++++++ x/oracle/types/tx.pb.go | 148 +++-- x/oracle/types/types.go | 74 ++- 43 files changed, 2348 insertions(+), 144 deletions(-) create mode 100644 app/ante/cosmos/oracle_two_phases.go create mode 100644 app/mempool.go create mode 100644 app/proposal_handler.go create mode 100644 proto/imuachain/oracle/v1/rawdata_nst.proto create mode 100644 x/oracle/keeper/abci.go create mode 100644 x/oracle/keeper/common/two_phases.go create mode 100644 x/oracle/keeper/feedermanagement/feedermanager_two_phases.go rename x/oracle/keeper/{native_token.go => nst_post_aggregation.go} (98%) create mode 100644 x/oracle/keeper/post_aggregation.go create mode 100644 x/oracle/keeper/two_phases.go create mode 100644 x/oracle/types/key_two_phase.go create mode 100644 x/oracle/types/merkletree.go create mode 100644 x/oracle/types/merkletree_test.go create mode 100644 x/oracle/types/rawdata_nst.pb.go diff --git a/app/ante/cosmos/context.go b/app/ante/cosmos/context.go index 69e5bd27b..dd9b5d4c4 100644 --- a/app/ante/cosmos/context.go +++ b/app/ante/cosmos/context.go @@ -41,7 +41,7 @@ func (sud SetUpContextDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate // If the transaction is an OracleCreatePriceTx, we set the gas meter to infinite // NOTE: this works fine with cosmossdk-noopmempool+tendermint-mempool, since tenderminit used gasMeter.Limit() to check the gaswanted of tx insteand of tx.GetGas(), but if works with application mempool, we need to be caution of the tx.GetGas() since oracle create-price tx doesn't really pay. - if anteutils.IsOracleCreatePriceTx(tx) { + if _, ok, _ := anteutils.OracleCreatePriceTx(tx); ok { newCtx = ctx.WithGasMeter(evmostypes.NewInfiniteGasMeterWithLimit(0)) return next(newCtx, tx, simulate) } diff --git a/app/ante/cosmos/fees.go b/app/ante/cosmos/fees.go index 3f614b3db..d545de0e1 100644 --- a/app/ante/cosmos/fees.go +++ b/app/ante/cosmos/fees.go @@ -59,8 +59,14 @@ func (dfd DeductFeeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bo return ctx, errorsmod.Wrap(errortypes.ErrTxDecode, "Tx must be a FeeTx") } - if anteutils.IsOracleCreatePriceTx(tx) { - newCtx := ctx.WithPriority(math.MaxInt64) + if _, isOracle, isRawData := anteutils.OracleCreatePriceTx(tx); isOracle { + var newCtx sdk.Context + if isRawData { + newCtx = ctx.WithPriority(math.MinInt64) + } else { + // set lowest priority for rawData tx, we include rawdata tx from imua-mempool to make sure that big raw data piece don't take all the space in a block, and the 'delivered' tx will be removed from consensus-mempoool by 'recheckTx' after commit + newCtx = ctx.WithPriority(math.MaxInt64) + } return next(newCtx, tx, simulate) } diff --git a/app/ante/cosmos/min_price.go b/app/ante/cosmos/min_price.go index 1a2b895f5..814ba312c 100644 --- a/app/ante/cosmos/min_price.go +++ b/app/ante/cosmos/min_price.go @@ -32,7 +32,7 @@ func (mpd MinGasPriceDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate return ctx, errorsmod.Wrapf(errortypes.ErrInvalidType, "invalid transaction type %T, expected sdk.FeeTx", tx) } - if anteutils.IsOracleCreatePriceTx(tx) { + if _, ok, _ := anteutils.OracleCreatePriceTx(tx); ok { return next(ctx, tx, simulate) } diff --git a/app/ante/cosmos/oracle_two_phases.go b/app/ante/cosmos/oracle_two_phases.go new file mode 100644 index 000000000..586759b9a --- /dev/null +++ b/app/ante/cosmos/oracle_two_phases.go @@ -0,0 +1,42 @@ +package cosmos + +import ( + "errors" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/imua-xyz/imuachain/app/ante/utils" +) + +type OracleTwoPhasesDecorator struct { + ok utils.OracleKeeper +} + +func NewOracleTwoPhasesDecorator(oracleKeeper utils.OracleKeeper) OracleTwoPhasesDecorator { + return OracleTwoPhasesDecorator{ + ok: oracleKeeper, + } +} + +func (otpd OracleTwoPhasesDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) { + msgs, _, isRawData := utils.OracleCreatePriceTx(tx) + if isRawData { + pieceWithProof, ok := otpd.ok.GetPieceWithProof(msgs[0]) + // valid failed when getting pieceWithProof + if !ok { + return ctx, errors.New("failed to valid and get pieceWithProof with a tx with oracle rawData") + } + proofPath := otpd.ok.MinimalProofPathByIndex(msgs[0].FeederID, uint32(pieceWithProof.Index)) + if len(proofPath) != int(pieceWithProof.ProofSize()) { + return ctx, fmt.Errorf("rawData proofPath size not match, expected:%d, got:%d", len(proofPath), pieceWithProof.ProofSize()) + } + // the proofPath need to be exactly the same of both value and order + for i, index := range proofPath { + if pieceWithProof.Proof[i].Index != index { + return ctx, fmt.Errorf("rawData proofPath didn't include necessary index on position:%d of path:%d", i, index) + } + } + } + + return next(ctx, tx, simulate) +} diff --git a/app/ante/cosmos/sigverify.go b/app/ante/cosmos/sigverify.go index 1628bbb89..a759fd044 100644 --- a/app/ante/cosmos/sigverify.go +++ b/app/ante/cosmos/sigverify.go @@ -6,6 +6,7 @@ import ( "encoding/hex" "errors" "fmt" + "strconv" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" kmultisig "github.com/cosmos/cosmos-sdk/crypto/keys/multisig" @@ -21,7 +22,6 @@ import ( authsigning "github.com/cosmos/cosmos-sdk/x/auth/signing" "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/imua-xyz/imuachain/app/ante/utils" - oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) var ( @@ -59,7 +59,7 @@ func NewSetPubKeyDecorator(ak authante.AccountKeeper) SetPubKeyDecorator { func (spkd SetPubKeyDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) { // skip publickkey set for oracle create-price message - if utils.IsOracleCreatePriceTx(tx) { + if _, ok, _ := utils.OracleCreatePriceTx(tx); ok { sigTx, ok := tx.(authsigning.SigVerifiableTx) if !ok { return ctx, sdkerrors.ErrTxDecode.Wrap("invalid transaction type, expected SigVerifiableTx") @@ -173,7 +173,7 @@ func (sgcd SigGasConsumeDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simula return ctx, sdkerrors.ErrTxDecode.Wrap("invalid transaction type, expected SigVerifiableTx") } - if utils.IsOracleCreatePriceTx(tx) { + if _, isOracle, _ := utils.OracleCreatePriceTx(tx); isOracle { return next(ctx, tx, simulate) } @@ -258,7 +258,7 @@ func OnlyLegacyAminoSigners(sigData signing.SignatureData) bool { } func (svd SigVerificationDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { - if utils.IsOracleCreatePriceTx(tx) { + if _, ok, _ := utils.OracleCreatePriceTx(tx); ok { sigTx, ok := tx.(authsigning.SigVerifiableTx) if !ok { return ctx, sdkerrors.ErrTxDecode.Wrap("invalid transaction type, expected SigVerifiableTx") @@ -386,9 +386,40 @@ func NewIncrementSequenceDecorator(ak authante.AccountKeeper, oracleKeeper utils func (isd IncrementSequenceDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (sdk.Context, error) { // oracle create-price message dont need to increment sequence, check its nonce instead - if utils.IsOracleCreatePriceTx(tx) { - for _, msg := range tx.GetMsgs() { - msg := msg.(*oracletypes.MsgCreatePrice) + if msgs, isOracle, isRawData := utils.OracleCreatePriceTx(tx); isOracle { + msg := msgs[0] + if isRawData { + // TODO(leonz): move this to ValidateBasic + if len(msg.Prices) != 1 || len(msg.Prices[0].Prices) == 0 { + return ctx, errors.New("invalid raw data price format") + } + pieceIndexTmp, err := strconv.ParseUint(msg.Prices[0].Prices[0].DetID, 10, 32) + pieceIndex := uint32(pieceIndexTmp) + if err != nil { + return ctx, fmt.Errorf("invalid piece index parsed from DetID, got:%s", msg.Prices[0].Prices[0].DetID) + } + if nextPieceIndex, found := isd.oracleKeeper.NextPieceIndexByFeederID(ctx, msg.FeederID); found { + if pieceIndex < nextPieceIndex { + return ctx, fmt.Errorf("piece index must be at least %d, got:%d", nextPieceIndex, pieceIndex) + } + if (ctx.IsCheckTx() && pieceIndex >= nextPieceIndex+uint32(isd.oracleKeeper.GetMaxNonceFromCache())) || + (!ctx.IsCheckTx() && pieceIndex > nextPieceIndex) { + return ctx, fmt.Errorf("invalid piece index, nextPieceIndex:%d, got:%d, isCheckTx:%t", nextPieceIndex, pieceIndex, ctx.IsCheckTx()) + } + } else { + return ctx, fmt.Errorf("no valid nextPieceIndex for feederID:%d", msg.FeederID) + } + + if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { + return ctx, errors.New("invalid address") + } else if _, err := isd.oracleKeeper.CheckAndIncreaseNextPieceIndex(ctx, sdk.ConsAddress(accAddress).String(), msg.FeederID, pieceIndex); err != nil { + return ctx, err + } + + return next(ctx, tx, simulate) + } + + for _, msg := range msgs { if accAddress, err := sdk.AccAddressFromBech32(msg.Creator); err != nil { return ctx, errors.New("invalid address") // #nosec G115 // safe conversion diff --git a/app/ante/cosmos/txsize_gas.go b/app/ante/cosmos/txsize_gas.go index d4993847f..b65495532 100644 --- a/app/ante/cosmos/txsize_gas.go +++ b/app/ante/cosmos/txsize_gas.go @@ -38,9 +38,12 @@ func (cgts ConsumeTxSizeGasDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, sim } // Skip gas consumption if tx is an OracleCreatePriceTx - if anteutils.IsOracleCreatePriceTx(tx) { - if len(ctx.TxBytes()) > anteutils.TxSizeLimit { - return ctx, sdkerrors.ErrTxTooLarge.Wrapf("oracle create-price tx has exceeds size limit, limit:%d, got:%d", anteutils.TxSizeLimit, len(ctx.TxBytes())) + if _, isOracle, isRawData := anteutils.OracleCreatePriceTx(tx); isOracle { + if isRawData && len(ctx.TxBytes()) > anteutils.TxSizeLimitOracleRawData { + return ctx, sdkerrors.ErrTxTooLarge.Wrapf("oracle create-price tx exceeds size limit, limit:%d, got:%d", anteutils.TxSizeLimitOracleRawData, len(ctx.TxBytes())) + } + if len(ctx.TxBytes()) > anteutils.TxSizeLimitOraclePrice { + return ctx, sdkerrors.ErrTxTooLarge.Wrapf("oracle create-price tx with rawdata exceeds size limit, limit:%d, got:%d", anteutils.TxSizeLimitOraclePrice, len(ctx.TxBytes())) } return next(ctx, tx, simulate) } diff --git a/app/ante/evm/fee_market.go b/app/ante/evm/fee_market.go index 355b7e7af..7e0c4c33a 100644 --- a/app/ante/evm/fee_market.go +++ b/app/ante/evm/fee_market.go @@ -30,7 +30,7 @@ func NewGasWantedDecorator( } func (gwd GasWantedDecorator) AnteHandle(ctx sdk.Context, tx sdk.Tx, simulate bool, next sdk.AnteHandler) (newCtx sdk.Context, err error) { - if anteutils.IsOracleCreatePriceTx(tx) { + if _, ok, _ := anteutils.OracleCreatePriceTx(tx); ok { return next(ctx, tx, simulate) } evmParams := gwd.evmKeeper.GetParams(ctx) diff --git a/app/ante/handler_options.go b/app/ante/handler_options.go index 138e80744..6a2e6b1bf 100644 --- a/app/ante/handler_options.go +++ b/app/ante/handler_options.go @@ -120,6 +120,7 @@ func newCosmosAnteHandler(options HandlerOptions) sdk.AnteHandler { cosmosante.NewIncrementSequenceDecorator(options.AccountKeeper, options.OracleKeeper), ibcante.NewRedundantRelayDecorator(options.IBCKeeper), evmante.NewGasWantedDecorator(options.EvmKeeper, options.FeeMarketKeeper), + cosmosante.NewOracleTwoPhasesDecorator(options.OracleKeeper), ) } diff --git a/app/ante/utils/interfaces.go b/app/ante/utils/interfaces.go index 7cc45d21f..7cb6e8385 100644 --- a/app/ante/utils/interfaces.go +++ b/app/ante/utils/interfaces.go @@ -3,6 +3,7 @@ package utils import ( sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) // BankKeeper defines the exposed interface for using functionality of the bank keeper @@ -27,4 +28,9 @@ type StakingKeeper interface { // OracleKeeper defines the exposed interface for using functionality of the oracle keeper type OracleKeeper interface { CheckAndIncreaseNonce(ctx sdk.Context, validator string, feederID uint64, nonce uint32) (prevNonce uint32, err error) + NextPieceIndexByFeederID(ctx sdk.Context, feederID uint64) (uint32, bool) + CheckAndIncreaseNextPieceIndex(ctx sdk.Context, validator string, feederID uint64, NextPieceIndex uint32) (nextPieceIndex uint32, err error) + GetMaxNonceFromCache() int32 + GetPieceWithProof(msg *oracletypes.MsgCreatePrice) (*oracletypes.PieceWithProof, bool) + MinimalProofPathByIndex(feederID uint64, index uint32) []uint32 } diff --git a/app/ante/utils/oracle.go b/app/ante/utils/oracle.go index d54deb01d..e3c7ba3f7 100644 --- a/app/ante/utils/oracle.go +++ b/app/ante/utils/oracle.go @@ -6,17 +6,32 @@ import ( ) // TxSizeLimit limits max size of a create-price tx, this is calculated based on one nativeTokenbalance message of worst case(max size), which will need 576 bytes for balance update -const TxSizeLimit = 1000 +// 48*1024+5*32+6*4 +... TODO(leonz): ensure the proto cost, now use a fixed 200B +const ( + TxSizeLimitOraclePrice = 49600 + TxSizeLimitOracleRawData = 200 +) -func IsOracleCreatePriceTx(tx sdk.Tx) bool { +// TODO(leonz): return additional error info +func OracleCreatePriceTx(tx sdk.Tx) (msgsO []*oracletypes.MsgCreatePrice, validOracle, validRawData bool) { msgs := tx.GetMsgs() if len(msgs) == 0 { - return false + return nil, false, false } + msgsO = make([]*oracletypes.MsgCreatePrice, 0, len(msgs)) for _, msg := range msgs { - if _, ok := msg.(*oracletypes.MsgCreatePrice); !ok { - return false + msgO, ok := msg.(*oracletypes.MsgCreatePrice) + if !ok { + return nil, false, false + } + if msgO.IsPhaseTwo() { + if len(msgs) > 1 { + return nil, false, false + } + msgsO = append(msgsO, msgO) + return msgsO, true, true } + msgsO = append(msgsO, msgO) } - return true + return msgsO, true, false } diff --git a/app/app.go b/app/app.go index 3c265a63f..b61c133ed 100644 --- a/app/app.go +++ b/app/app.go @@ -105,7 +105,6 @@ import ( "github.com/cosmos/cosmos-sdk/store/streaming" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/mempool" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/version" srvflags "github.com/evmos/evmos/v16/server/flags" @@ -354,7 +353,7 @@ type ImuachainApp struct { OperatorKeeper operatorKeeper.Keeper ImSlashKeeper slashKeeper.Keeper AVSManagerKeeper avsManagerKeeper.Keeper - OracleKeeper oracleKeeper.Keeper + OracleKeeper *oracleKeeper.Keeper ImmintKeeper immintkeeper.Keeper DistrKeeper distrkeeper.Keeper @@ -394,10 +393,10 @@ func NewImuachainApp( eip712.SetEncodingConfig(encodingConfig) + var oKeeper *oracleKeeper.Keeper // Setup Mempool and Proposal Handlers baseAppOptions = append(baseAppOptions, func(app *baseapp.BaseApp) { - // NOTE: we use a NoOpMempool here, for oracle create-price, it works fine since we have set a infinitgasmeterwithlimit in the ante handler to avoid the out-of-gas error no matter what the amount/gas is set by tx builder, and we set the highest priority for oracle create-price txs to work properly with tendermint mempool to make sure oracle creat-prie tx will be included in the mempool if received. And if we want to use some other application mempool, we need to take care of the gas limit and gas price in the oracle create-price txs.(we don't need to bother this since tendermint mempool use gasMeter.limit() instead of tx.Gas()) - mempool := mempool.NoOpMempool{} + mempool := NewImuaMempool(oKeeper, encodingConfig.TxConfig.TxDecoder()) app.SetMempool(mempool) handler := baseapp.NewDefaultProposalHandler(mempool, app) app.SetPrepareProposal(handler.PrepareProposalHandler()) @@ -462,6 +461,7 @@ func NewImuachainApp( keys: keys, tkeys: tkeys, memKeys: memKeys, + OracleKeeper: oKeeper, } // init params keeper and subspaces @@ -544,7 +544,7 @@ func NewImuachainApp( // asset and client chain registry. app.AssetsKeeper = assetsKeeper.NewKeeper( - keys[assetsTypes.StoreKey], appCodec, &app.OracleKeeper, app.AccountKeeper, + keys[assetsTypes.StoreKey], appCodec, app.OracleKeeper, app.AccountKeeper, app.BankKeeper, &app.DelegationKeeper, authAddrString, ) @@ -582,7 +582,7 @@ func NewImuachainApp( ) // x/oracle is not fully integrated (or enabled) but allows for exchange rates to be added. - app.OracleKeeper = oracleKeeper.NewKeeper( + *app.OracleKeeper = oracleKeeper.NewKeeper( appCodec, keys[oracleTypes.StoreKey], memKeys[oracleTypes.MemStoreKey], app.GetSubspace(oracleTypes.ModuleName), app.StakingKeeper, &app.DelegationKeeper, &app.AssetsKeeper, authAddrString, @@ -696,7 +696,7 @@ func NewImuachainApp( keys[operatorTypes.StoreKey], appCodec, app.AssetsKeeper, &app.DelegationKeeper, // intentionally a pointer, since not yet initialized. - &app.OracleKeeper, + app.OracleKeeper, &app.AVSManagerKeeper, &app.StakingKeeper, delegationTypes.VirtualSlashKeeper{}, @@ -904,7 +904,7 @@ func NewImuachainApp( reward.NewAppModule(appCodec, app.RewardKeeper), imslash.NewAppModule(appCodec, app.ImSlashKeeper), avs.NewAppModule(appCodec, app.AVSManagerKeeper), - oracle.NewAppModule(appCodec, app.OracleKeeper, app.AccountKeeper, app.BankKeeper), + oracle.NewAppModule(appCodec, *app.OracleKeeper, app.AccountKeeper, app.BankKeeper), distr.NewAppModule(appCodec, app.DistrKeeper), ) @@ -1106,6 +1106,7 @@ func NewImuachainApp( func (app *ImuachainApp) Name() string { return app.BaseApp.Name() } func (app *ImuachainApp) setAnteHandler(txConfig client.TxConfig, maxGasWanted uint64) { + app.OracleKeeper.GetPieceWithProof(nil) options := ante.HandlerOptions{ Cdc: app.appCodec, AccountKeeper: app.AccountKeeper, diff --git a/app/mempool.go b/app/mempool.go new file mode 100644 index 000000000..81bda7de0 --- /dev/null +++ b/app/mempool.go @@ -0,0 +1,280 @@ +package app + +import ( + "context" + "errors" + "slices" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/mempool" + + "github.com/imua-xyz/imuachain/app/ante/utils" + oraclekeeper "github.com/imua-xyz/imuachain/x/oracle/keeper" + oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" +) + +type ImuaMempool struct { + // feederID -> []PieceWithProof, cached peiceWithProof for feederID + cachedPieces map[uint64]map[uint32][]*oracletypes.PieceWithProof + k *oraclekeeper.Keeper + count int + // load from config file as a config for mempool + //cacheWindow int + txDecoder sdk.TxDecoder +} + +// Insert inserts a tx into mempool, currently only used for rawData from tx related to 2-phases aggregation of oracle module +func (em *ImuaMempool) Insert(ctx context.Context, tx sdk.Tx) error { + // we don't filter tx not with message of rawData type, those tx will just be added into tendermint's txpool + if !em.includesMsgOracle(tx) { + return nil + } + + piece, msgOracle, _, isRawData := em.GetPieceWithProof(tx) + // we don't filter tx not with message of rawData type + if !isRawData { + return nil + } + if piece == nil { + return errors.New("failed to parse pieceWithProof from tx with oracle rawData message") + } + fID := msgOracle.FeederID + + piecesCached, ok := em.cachedPieces[fID] + if !ok { + em.cachedPieces[fID] = make(map[uint32][]*oracletypes.PieceWithProof) + em.cachedPieces[fID][piece.Index] = []*oracletypes.PieceWithProof{piece} + em.count++ + return nil + } + + piecesIndexCached, ok := piecesCached[piece.Index] + if !ok { + piecesCached[piece.Index] = []*oracletypes.PieceWithProof{piece} + em.count++ + return nil + } + for _, pieceCached := range piecesIndexCached { + if pieceCached.EqualsTo(piece) { + return errors.New("piece exists in mempool") + } + } + piecesCached[piece.Index] = append(piecesIndexCached, piece) + em.count++ + return nil +} + +func (em *ImuaMempool) Select(ctx context.Context, txList [][]byte) mempool.Iterator { + // remove all expired tx, when Select for block 100, all txs belongs to 99 or before should be removed + + // feederIDS:[]uint64, which are expecting rawData + // []Tx, each feederID must have one tx + collectingFeederIDs := em.k.FeederManager.FeederIDsCollectingRawData() + if len(collectingFeederIDs) == 0 { + // remove all cached pieces since no collectingFeederIDs available + em.reset() + txDecodedList := make([]sdk.Tx, 0, len(txList)) + for _, txBytes := range txList { + tx, err := em.txDecoder(txBytes) + if err != nil { + continue + } + if _, _, isRawData := utils.OracleCreatePriceTx(tx); isRawData { + continue + } + txDecodedList = append(txDecodedList, tx) + } + return IteratorFromSlice(txDecodedList) + } + + em.clearExpiredFeederIDcache(collectingFeederIDs) + + seenFeederIDs := make(map[uint64]struct{}) + keep := make([]sdk.Tx, 0, len(txList)) + for _, txBytes := range txList { + tx, err := em.txDecoder(txBytes) + if err != nil { + keep = append(keep, tx) + continue + } + // TODO:(leonz) remove this + if !em.includesMsgOracle(tx) { + keep = append(keep, tx) + continue + } + msgs, _, isRawData := utils.OracleCreatePriceTx(tx) + if !isRawData { + // we don't need to check isOracle, it's already checked in anteHandler, and if some proposer filled in any invalid message it will be reject by anteHandler again in runTx + keep = append(keep, tx) + continue + } + msgOracle := msgs[0] + // check this before collectingFeederIds 'contain', since it's faster for map than slice + if _, ok := seenFeederIDs[msgOracle.FeederID]; ok { + // we only keep one tx for each feederID, we don't check if the piece index is expected, it's handled by anteHandler + continue + } + if !slices.Contains(collectingFeederIDs, msgOracle.FeederID) { + // the piece included in this tx is not expected, we just remove it + continue + } + keep = append(keep, tx) + seenFeederIDs[msgOracle.FeederID] = struct{}{} + } + // fill txs from imua-mempool for missed txs which is required by 'collectinFeederIDs' + if len(seenFeederIDs) < len(collectingFeederIDs) { + for _, expectedFeederID := range collectingFeederIDs { + if _, ok := seenFeederIDs[expectedFeederID]; ok { + continue + } + seenFeederIDs[expectedFeederID] = struct{}{} + nextPieceID, ok := em.k.NextPieceIndexByFeederID(sdk.UnwrapSDKContext(ctx), expectedFeederID) + if !ok { + // this should not happen since the 'expectedFeederID' is valid for collectinng rawData + continue + } + seenFeederIDs[expectedFeederID] = struct{}{} + if tx := em.getTxByFeederIDPieceIndex(expectedFeederID, nextPieceID); tx != nil { + keep = append(keep, tx) + } + } + } + if len(keep) > 0 { + return IteratorFromSlice(keep) + } + + return nil +} + +func (em *ImuaMempool) CountTx() int { + return em.count +} + +// Remove removes tx from mempool +func (em *ImuaMempool) Remove(tx sdk.Tx) error { + // TODO(leonz): clear history sealed round on block change + // remove all pieces with indexes <= piece index of input msg + piece, msgOracle, _, isRawData := em.GetPieceWithProof(tx) + // we don't process tx not with rawdata + if !isRawData { + return nil + } + piecesCached, ok := em.cachedPieces[msgOracle.FeederID] + if !ok { + return nil + } + newPiecesCached := make(map[uint32][]*oracletypes.PieceWithProof) + for index, piecesIndexCached := range piecesCached { + if index > piece.Index { + newPiecesCached[index] = piecesIndexCached + } else { + em.count -= len(piecesIndexCached) + } + } + if len(newPiecesCached) == 0 { + delete(em.cachedPieces, msgOracle.FeederID) + } else if len(newPiecesCached) < len(piecesCached) { + em.cachedPieces[msgOracle.FeederID] = newPiecesCached + } + return nil +} + +// includesMsgOracle returns true if tx includes MsgCreatePrice +func (em *ImuaMempool) includesMsgOracle(tx sdk.Tx) bool { + msgs := tx.GetMsgs() + for _, msg := range msgs { + if _, ok := msg.(*oracletypes.MsgCreatePrice); ok { + return true + } + } + return false +} + +// GetPieceWithProof returns pieceWithProof and msgCreatePrice from tx +func (em *ImuaMempool) GetPieceWithProof(tx sdk.Tx) (pieceWithProof *oracletypes.PieceWithProof, msgO *oracletypes.MsgCreatePrice, isOracle, isRawData bool) { + var msgs []*oracletypes.MsgCreatePrice + msgs, isOracle, isRawData = utils.OracleCreatePriceTx(tx) + if !isRawData { + return + } + msgO = msgs[0] + + pieceWithProof, _ = em.k.FeederManager.GetPieceWithProof(msgO) + if pieceWithProof != nil { + pieceWithProof.Tx = tx + } + return +} + +func (em *ImuaMempool) getTxByFeederIDPieceIndex(feederID uint64, pieceIndex uint32) sdk.Tx { + pwf, ok := em.cachedPieces[feederID] + if !ok { + return nil + } + piecesCached, ok := pwf[pieceIndex] + if !ok || len(piecesCached) == 0 { + return nil + } + + if len(piecesCached) > 1 { + // we got different tx for the same piece, there must be at least one invalid piece + // imua-mempool don't do the verify, we just pick the first one in cache and move it the the end of the list for that pieceIndex + // we just remove the picked to the list end instead of deleting it since that't the duty of 'Remove' + piecesCached = append(piecesCached[1:], piecesCached[0]) + } + // the first cached piece had been moved to the end + return piecesCached[len(piecesCached)-1].Tx +} + +func (em *ImuaMempool) reset() { + em.cachedPieces = make(map[uint64]map[uint32][]*oracletypes.PieceWithProof) + em.count = 0 +} + +func (em *ImuaMempool) clearExpiredFeederIDcache(collectingFeederIDs []uint64) { + feederIDs := make(map[uint64]struct{}) + for _, feederID := range collectingFeederIDs { + feederIDs[feederID] = struct{}{} + } + + for feederID := range em.cachedPieces { + if _, ok := feederIDs[feederID]; !ok { + delete(em.cachedPieces, feederID) + } + } +} + +func IteratorFromSlice(txList []sdk.Tx) *ImuaMemIterator { + // TODO:(leonz) implement me + return &ImuaMemIterator{txList: txList} + return nil +} + +type ImuaMemIterator struct { + txList []sdk.Tx + cursor int +} + +func (ii *ImuaMemIterator) Next() mempool.Iterator { + if ii.cursor >= len(ii.txList)-1 { + return nil + } + ii.cursor++ + return nil +} + +func (ii *ImuaMemIterator) Tx() sdk.Tx { + if ii.cursor < len(ii.txList) { + return ii.txList[ii.cursor] + } + return nil +} + +func NewImuaMempool(oKeeper *oraclekeeper.Keeper, decoder sdk.TxDecoder) *ImuaMempool { + return &ImuaMempool{ + cachedPieces: make(map[uint64]map[uint32][]*oracletypes.PieceWithProof), + k: oKeeper, + txDecoder: decoder, + } + +} diff --git a/app/proposal_handler.go b/app/proposal_handler.go new file mode 100644 index 000000000..4879f7a48 --- /dev/null +++ b/app/proposal_handler.go @@ -0,0 +1 @@ +package app diff --git a/client/docs/swagger-ui/swagger.json b/client/docs/swagger-ui/swagger.json index 81722390d..67285b217 100644 --- a/client/docs/swagger-ui/swagger.json +++ b/client/docs/swagger-ui/swagger.json @@ -18838,6 +18838,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } } } @@ -19580,6 +19585,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." @@ -19947,6 +19957,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." @@ -41690,6 +41705,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." @@ -42183,6 +42203,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." @@ -42665,6 +42690,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." @@ -42935,6 +42965,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } } } @@ -43484,6 +43519,11 @@ "title": "slash_fraction_malicious defines the fraction one validator should be punished for malicious behavior" } } + }, + "piece_size_byte": { + "type": "integer", + "format": "int64", + "title": "piece_size_byte defines how many bytes one piece of raw data includes" } }, "description": "Params defines the parameters for the module." diff --git a/proto/imuachain/oracle/v1/params.proto b/proto/imuachain/oracle/v1/params.proto index 3c9e67dcf..a7d088e32 100644 --- a/proto/imuachain/oracle/v1/params.proto +++ b/proto/imuachain/oracle/v1/params.proto @@ -37,6 +37,9 @@ message Params { // slashing defines the slashing related params SlashingParams slashing = 12; + + // piece_size_byte defines how many bytes one piece of raw data includes + uint32 piece_size_byte = 13; } // ConsensusMode defines the consensus mode for the prices. diff --git a/proto/imuachain/oracle/v1/rawdata_nst.proto b/proto/imuachain/oracle/v1/rawdata_nst.proto new file mode 100644 index 000000000..0fa60583e --- /dev/null +++ b/proto/imuachain/oracle/v1/rawdata_nst.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package imuachain.oracle.v1; + +option go_package = "github.com/imua-xyz/imuachain/x/oracle/types"; + +// NSTKV key-value pair to tell staker_index and its corresponding balance change +message NSTKV { + // staker index for a nst defined on imuachain side + uint32 staker_index = 1; + // balance change since last update + int64 balance_change = 2; +} + +// RawDataNST represents balance changes of all stakers for a NST +message RawDataNST { + // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed + repeated NSTKV nst_balance_changes = 1; +} diff --git a/proto/imuachain/oracle/v1/tx.proto b/proto/imuachain/oracle/v1/tx.proto index 9f16cd5a6..cd384d601 100644 --- a/proto/imuachain/oracle/v1/tx.proto +++ b/proto/imuachain/oracle/v1/tx.proto @@ -19,19 +19,33 @@ service Msg { rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); } +// AggregationPhase defines the aggregation phase of the message +enum AggregationPhase{ + option (gogoproto.goproto_enum_prefix) = false; + // MESSAGE_PHASE_NON is the default value, which means the message is not of type 2-phases aggregation + AGGREGATION_PHASE_UNSPECIFIED = 0[(gogoproto.enumvalue_customname) = "AggregationPhaseUnspecified"]; + // MESSAGE_PHASE_ONE is the first phase of the 2-phases aggregation + AGGREGATION_PHASE_ONE = 1[(gogoproto.enumvalue_customname) = "AggregationPhaseOne"]; + // MESSAGE_PHASE_TWO is the second phase of the 2-phases aggregation + AGGREGATION_PHASE_TWO = 2[(gogoproto.enumvalue_customname) = "AggregationPhaseTwo"]; +} + + // MsgCreatePrice provide the price updating message message MsgCreatePrice { // creator tells which is the message sender and should sign this message string creator = 1 [(cosmos_proto.scalar) = "cosmos.ValidatorAddressString"]; // refer to id from Params.TokenFeeders, 0 is reserved, invalid to use uint64 feeder_id = 2 [(gogoproto.customname) = "FeederID"]; - // prices price with its corresponding source repeated PriceSource prices = 3; // on which block commit does this message be built on uint64 based_block = 4; // nonce represents the unique number to disginguish duplicated messages int32 nonce = 5; + // true: this message includes data of {rawdata, proof} need to be verified based on consensused root + // false: this message includes data need to get consensus based on voting power + AggregationPhase phase = 6; } // MsgCreatePriceResponse diff --git a/x/assets/types/keys.go b/x/assets/types/keys.go index 6b72bd6f2..3e5107eb4 100644 --- a/x/assets/types/keys.go +++ b/x/assets/types/keys.go @@ -40,7 +40,7 @@ const ( prefixOperatorOptedInMiddlewareAssetInfo // unused prefixes for backward compatibility - // originally: KeyPrefixReStakerExoCoreAddr and Reverse + // originally: KeyPrefixReStakerImuachainAddr and Reverse _ _ diff --git a/x/oracle/keeper/abci.go b/x/oracle/keeper/abci.go new file mode 100644 index 000000000..dc5fc1a83 --- /dev/null +++ b/x/oracle/keeper/abci.go @@ -0,0 +1,24 @@ +package keeper + +import ( + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" +) + +func (k Keeper) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + if k.postHandlers == nil { + k.postHandlers = make(map[int64]common.PostAggregationHandler) + // bond handlers for custom pre defined token feeders + k.RegisterPostAggregation() + // bond handlers for nst token feeders + p := k.GetParams(ctx) + for tfID, tf := range p.TokenFeeders { + // #nosec G115 - safe conversion since tokenId is set from slice index + if p.IsNST(int(tf.TokenID)) { + k.BondPostAggregation(int64(tfID), UpdateNSTBalanceChange) + } + } + } + k.FeederManager.BeginBlock(ctx) +} diff --git a/x/oracle/keeper/common/two_phases.go b/x/oracle/keeper/common/two_phases.go new file mode 100644 index 000000000..60a2f9ff8 --- /dev/null +++ b/x/oracle/keeper/common/two_phases.go @@ -0,0 +1,8 @@ +package common + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// the input data could be either rawData bytes of data with big size for non-price senarios or 'price' info +type PostAggregationHandler func(data []byte, ctx sdk.Context, k KeeperOracle) error diff --git a/x/oracle/keeper/feedermanagement/caches.go b/x/oracle/keeper/feedermanagement/caches.go index 8c0c15669..f4d1e73c2 100644 --- a/x/oracle/keeper/feedermanagement/caches.go +++ b/x/oracle/keeper/feedermanagement/caches.go @@ -9,13 +9,12 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" + "github.com/imua-xyz/imuachain/x/oracle/types" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) type ItemV map[string]*big.Int -const v1RuleID = 1 - func (c *caches) CpyForSimulation() *caches { ret := *c msg := *(c.msg) @@ -87,10 +86,61 @@ func (c *caches) IsDeterministic(sourceID int64) (bool, error) { return sources[sourceID].Deterministic, nil } -// RuleV1, we restrict the source to be Chainlink and only that source is acceptable +// RuleV1: +// 1. single deterministic source (like chainlink) +// 2. 2-phase aggregation with single deterministic source +// we don't verify the source-name to be 'chainlink' or 'beaconchain', it is satisefied as long as +// all validators aggreed on the same source func (c *caches) IsRuleV1(feederID int64) bool { - ruleID := c.params.params.TokenFeeders[feederID].RuleID - return ruleID == v1RuleID && len(c.params.params.Sources) == 2 && c.params.params.Sources[1].Name == oracletypes.SourceChainlinkName + p := c.params.params + // #nosec - G115 ruleID is assigned with slice index + ruleID := int(p.TokenFeeders[feederID].RuleID) + if ruleID == 0 || ruleID >= len(p.Rules) { + return false + } + + rule := p.Rules[ruleID] + if len(rule.SourceIDs) == 1 { + // #nosec G115 - sourceID is assigned with slice index + sID := int(rule.SourceIDs[0]) + if sID == 0 || sID >= len(p.Sources) { + return false + } + if s := p.Sources[sID]; s.Deterministic { + return true + } + } + + return c.isRule2PhasesByRule(rule) +} + +// TODO: forward this to cacheParams +// IsRule2Phases returns whether a tokenfeeder is restricted by 2-phases rule +func (c *caches) IsRule2PhasesByFeederID(feederID uint64) bool { + p := c.params.params + // #nosec G115 - ruleID is set from index of slice which is actually type of int + ruleID := int(p.TokenFeeders[feederID].RuleID) + if ruleID == 0 || ruleID >= len(p.Rules) { + return false + } + rule := p.Rules[ruleID] + return c.isRule2PhasesByRule(rule) +} + +func (c *caches) isRule2PhasesByRule(rule *types.RuleSource) bool { + // just check the format and don't care the verification here, the verification should be done by 'params' not in this memory calculator(feedermanager) + if len(rule.SourceIDs) == 1 && rule.SourceIDs[0] == 0 && + rule.Nom != nil && len(rule.Nom.SourceIDs) == 1 { + // #nosec G115 - ruleID is set from index of slice which is actually type of int + sID := int(rule.Nom.SourceIDs[0]) + if sID == 0 || sID >= len(c.params.params.Sources) { + return false + } + if s := c.params.params.Sources[sID]; s.Deterministic && rule.Nom.Minimum == 1 { + return true + } + } + return false } func (c *caches) GetTokenIDForFeederID(feederID int64) (int64, bool) { @@ -368,6 +418,15 @@ func (c *caches) Commit(ctx sdk.Context, reset bool) (msgUpdated, validatorsUpda return } +func (c *caches) RawDataPieceSize() uint32 { + return c.params.params.PieceSizeByte +} + +func (c *caches) IntervalForFeederID(feederID uint64) uint64 { + // TODO: change type of interval to uint32 + return c.params.params.TokenFeeders[feederID].Interval +} + func (c *caches) ResetCaches() { *c = *(newCaches()) } diff --git a/x/oracle/keeper/feedermanagement/feedermanager.go b/x/oracle/keeper/feedermanagement/feedermanager.go index 8c0cfeecb..672ca142f 100644 --- a/x/oracle/keeper/feedermanagement/feedermanager.go +++ b/x/oracle/keeper/feedermanagement/feedermanager.go @@ -1,6 +1,7 @@ package feedermanagement import ( + "encoding/hex" "errors" "fmt" "math/big" @@ -530,7 +531,8 @@ func (f *FeederManager) updateRoundsParamsAndAddNewRounds(ctx sdk.Context) { if _, ok := existsFeederIDs[feederID]; !ok && (tokenFeeder.EndBlock == 0 || tokenFeeder.EndBlock > uint64(height)) { logger.Info("[mem] add new round", "feederID", feederID, "height", height) f.sortedFeederIDs = append(f.sortedFeederIDs, feederID) - f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian()) + twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(feederID)) + f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) } } f.sortedFeederIDs.sort() @@ -603,6 +605,7 @@ func (f *FeederManager) SetForceSeal() { } func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { + // TODO:(leonz) ? this validation is not suitable for validateBasic, it need state information, but maybe move them into anteHandler ? // nonce, feederID, creator has been verified by anteHandler // baseBlock is going to be verified by its corresponding round decimal, err := f.cs.GetDecimalFromFeederID(msg.FeederID) @@ -659,6 +662,37 @@ func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { } } } + + // extra check for message as 1st phase for 2-phases aggregation + if msg.IsPhaseOne() { + if len(msg.Prices) != 1 { + return errors.New("2-phases aggregation should have exactly one source") + } + if len(msg.Prices[0].Prices) != 1 { + return errors.New("2-phases aggregation should have exactly one price") + } + lPrice := len(msg.Prices[0].Prices[0].Price) + if lPrice == 0 || lPrice > int(f.cs.RawDataPieceSize()) { + return fmt.Errorf("2-phases aggregation should have exactly one price with length between 1 and %d", f.cs.RawDataPieceSize()) + } + + // detID is used to tell how many pieces the raw data is divided into + leafCountStr := msg.Prices[0].Prices[0].DetID + if len(leafCountStr) == 0 { + return errors.New("2-phases aggregation should have detID to tell how many pieces the raw data is divided into") + } + leafCount, err := strconv.ParseUint(leafCountStr, 10, 32) + if err != nil { + return fmt.Errorf("2-phases aggregation detID should be a valid uint32, got:%s", leafCountStr) + } + + // we wait one more maxNonce blocks to make sure proposer getting expected txs in their mempool + // #nosec G115 // maxNonce is positive + windowForPhaseTwo := f.cs.IntervalForFeederID(msg.FeederID) - uint64(f.cs.GetMaxNonce())*2 + if leafCount > windowForPhaseTwo { + return fmt.Errorf("2-phases aggregation for feederID:%d, should have detID less than or equal to %d", msg.FeederID, windowForPhaseTwo) + } + } return nil } @@ -669,6 +703,7 @@ func (f *FeederManager) ProcessQuote(ctx sdk.Context, msg *oracletypes.MsgCreate if err := f.ValidateMsg(msg); err != nil { return nil, oracletypes.ErrInvalidMsg.Wrap(err.Error()) } + msgItem := getProtoMsgItemFromQuote(msg) // #nosec G115 // feederID is index of slice @@ -804,7 +839,8 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { continue } tfID := int64(tfID) - f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian()) + twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(tfID)) + f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) f.sortedFeederIDs.add(tfID) } f.prepareRounds(ctxReplay) @@ -845,6 +881,33 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { return true, nil } +func (f *FeederManager) RoundIDToBaseBlock(feederID, roundID uint64) (uint64, bool) { + r, ok := f.rounds[int64(feederID)] + if !ok { + return 0, false + } + return r.baseBlockFromRoundID(roundID) +} + +// BaseBlockToRoundID returns the roundID which the input baseblock indicates to, it is different to the roundID of which this baseBlock BelongsTo (+1) +func (f *FeederManager) BaseBlockToNextRoundID(feederID, baseBlock uint64) (uint64, bool) { + //TODO(leonz): use uint64 as f.rounds key + // #nosec G115 + r, ok := f.rounds[int64(feederID)] + if !ok { + return 0, false + } + // TODO(leonz): use uint64 for getPosition + // #nosec G115 + b, rID, _, _ := r.getPosition(int64(baseBlock)) + // #nosec G115 + if uint64(b) != baseBlock { + return 0, false + } + // #nosec G115 + return uint64(rID), true +} + func (f *FeederManager) Equals(fm *FeederManager) bool { if f == nil || fm == nil { return f == fm @@ -932,3 +995,35 @@ func getProtoMsgItemFromQuote(msg *oracletypes.MsgCreatePrice) *oracletypes.MsgI PSources: msg.Prices, } } + +func (f *FeederManager) ProcessRawData(msg *oracletypes.MsgCreatePrice) error { + piece, ok := f.GetPieceWithProof(msg) + if !ok { + return errors.New("failed to parse rawdata piece from message") + } + // #nosec G115 + r, ok := f.rounds[int64(msg.FeederID)] + if !ok { + // this should not happen + return fmt.Errorf("round for feederID:%d not exists", msg.FeederID) + } + if r.m != nil { + return fmt.Errorf("feederID %d is not collecting rawData", msg.FeederID) + } + // we don't check the 1st return value to see if this input proof is of the minimal, that's the duty of anteHandler, and 'verified' pieceWithProof will not fail the tx execution + _, ok = r.m.VerifyAndCache(piece.Index, piece.RawData, piece.Proof) + if !ok { + return fmt.Errorf("failed to verify piece of index %d provided within message for feederID:%d against root:%s", piece.Index, msg.FeederID, hex.EncodeToString(r.m.RootHash())) + } + // we don't do no state update in tx exexuting, the postHandler and all state update will be handled in EndBlock + // // post handle rawData registered for the feederID + // // clear all caching pieces from stateDB + // + // // remove/reset merkleTree + // // remove merkleTree + // persist piece for recovery (with memory-cache update into merkleTree) + // save this piece and proof to db for recovery, for nodes without running, + // this process only causes additional: two write to stateDB(piece, proof), one read from the stateDB(piece) + + return nil +} diff --git a/x/oracle/keeper/feedermanagement/feedermanager_two_phases.go b/x/oracle/keeper/feedermanagement/feedermanager_two_phases.go new file mode 100644 index 000000000..89913b91f --- /dev/null +++ b/x/oracle/keeper/feedermanagement/feedermanager_two_phases.go @@ -0,0 +1,140 @@ +package feedermanagement + +import ( + "encoding/base64" + "sort" + "strconv" + "strings" + + "github.com/ethereum/go-ethereum/common" + oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" +) + +func (f *FeederManager) NextPieceIndexByFeederID(feederID uint64) (uint32, bool) { + r, ok := f.rounds[int64(feederID)] + if !ok || r.m == nil { + return 0, false + } + return r.m.NextPieceIndex() +} + +func (f *FeederManager) MaxPieceIndexForTokenFeederID(feederID uint64) (uint32, bool) { + r, ok := f.rounds[int64(feederID)] + if !ok { + return 0, false + } + if r.m == nil || r.m.LeafCount() < 1 { + return 0, false + } + return r.m.LeafCount() - 1, true +} + +// VerifyPieceProofsForTokenFeederID verifies targetPiece against feederID's corresponding merkle root, it might need proof nodes from pieces +// return (rootHash, verified) +func (f *FeederManager) VerifyPieceProofsForTokenFeederID(feederID uint64, targetPiece *oracletypes.PieceWithProof) ([]byte, bool) { + r, ok := f.rounds[int64(feederID)] + if !ok { + return r.m.RootHash(), false + } + if r.m != nil || r.m.Completed() { + return r.m.RootHash(), false + } + // the proof has been verified to be minimal in anteHandler so we dont need to check the first returned value + _, verified := r.m.VerifyAndCache(targetPiece.Index, targetPiece.RawData, targetPiece.Proof) + return r.m.RootHash(), verified +} + +// GetPieceWithProof verify the message is a valid rawData message and parse the piece of rawData +func (f *FeederManager) GetPieceWithProof(msg *oracletypes.MsgCreatePrice) (*oracletypes.PieceWithProof, bool) { + if !msg.IsPhaseTwo() { + return nil, false + } + if !f.cs.IsRule2PhasesByFeederID(msg.FeederID) { + return nil, false + } + + r := f.rounds[int64(msg.FeederID)] + if r == nil { + return nil, false + } + pieceCount := r.PieceCount() + + if len(msg.Prices) != 1 || len(msg.Prices[0].Prices) < 1 || len(msg.Prices[0].Prices) > 2 { + return nil, false + } + + pieceStr := msg.Prices[0].Prices[0].Price + if len(pieceStr) == 0 || len(pieceStr) > int(f.cs.RawDataPieceSize()) { + return nil, false + } + + tmp, err := strconv.ParseUint(msg.Prices[0].Prices[0].DetID, 10, 32) + pieceIndex := uint32(tmp) + if err != nil || pieceIndex > pieceCount || pieceIndex < 1 { + return nil, false + } + + ret := &oracletypes.PieceWithProof{ + Index: pieceIndex, + RawData: []byte(pieceStr), + } + + if len(msg.Prices[0].Prices) == 2 { + joinedHashesBase64 := msg.Prices[0].Prices[1].Price + joinedIndexes := msg.Prices[0].Prices[1].DetID + if len(joinedHashesBase64) == 0 || len(joinedIndexes) == 0 { + return nil, false + } + + hashesBase64 := strings.Split(joinedHashesBase64, oracletypes.DelimiterForBase64) + indexes := strings.Split(joinedIndexes, oracletypes.DelimiterForBase64) + + if len(hashesBase64) == 0 || len(hashesBase64) != len(indexes) { + return nil, false + } + + proof := make([]*oracletypes.HashNode, 0, len(hashesBase64)) + for i, hashBase64 := range hashesBase64 { + hashBytes, err := base64.StdEncoding.DecodeString(hashBase64) + if err != nil || len(hashBytes) != common.HashLength { + return nil, false + } + tmp, err := strconv.ParseUint(indexes[i], 10, 32) + index := uint32(tmp) + if err != nil || index < 1 || index > pieceCount { + return nil, false + } + proof = append(proof, &oracletypes.HashNode{Index: index, Hash: hashBytes}) + } + ret.Proof = proof + } + return ret, true +} + +// GetTidyProofPathByIndex return the proof path with unseen nodes that under condition pieces comes as index from 0 to n, and cached all seen proof nodes +func (f *FeederManager) MinimalProofPathByIndex(feederID uint64, index uint32) []uint32 { + r, ok := f.rounds[int64(feederID)] + if !ok { + return nil + } + if r.m == nil { + return nil + } + return r.m.MinimalProofPathByIndex(index) +} + +// FeederIDsCollectingRawData returns the list of feederIDs that are currently collecting raw data +// the list is sorted in ascending order +func (f *FeederManager) FeederIDsCollectingRawData() []uint64 { + // TODO(leonz): implement me + ret := make([]uint64, 0) + for feederID, r := range f.rounds { + if r.m != nil && !r.m.Completed() { + ret = append(ret, uint64(feederID)) + } + } + sort.Slice(ret, func(i, j int) bool { + return ret[i] < ret[j] + }) + return ret +} diff --git a/x/oracle/keeper/feedermanagement/helper_test.go b/x/oracle/keeper/feedermanagement/helper_test.go index 0a536743e..4f70b4611 100644 --- a/x/oracle/keeper/feedermanagement/helper_test.go +++ b/x/oracle/keeper/feedermanagement/helper_test.go @@ -98,12 +98,12 @@ func (t *Test) NewAggregator(filled bool) *aggregator { func (t *Test) NewRound(cs CacheReader) *round { feederID := r.Intn(len(params.TokenFeeders)-1) + 1 - round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian) + round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) return round } func (t *Test) NewRoundWithFeederID(cs CacheReader, feederID int64) *round { - round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian) + round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) return round } diff --git a/x/oracle/keeper/feedermanagement/round.go b/x/oracle/keeper/feedermanagement/round.go index cf5f0c53d..d2b5384ea 100644 --- a/x/oracle/keeper/feedermanagement/round.go +++ b/x/oracle/keeper/feedermanagement/round.go @@ -6,8 +6,8 @@ import ( oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) -func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm) *round { - return &round{ +func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm, twoPhases bool) *round { + ret := &round{ // #nosec G115 startBaseBlock: int64(tokenFeeder.StartBaseBlock), // #nosec G115 @@ -28,7 +28,12 @@ func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowS roundBaseBlock: 0, roundID: 0, algo: algo, + twoPhases: twoPhases, } + if twoPhases { + ret.rawData = make([][]byte, 0) + } + return ret } func (r *round) Equals(r2 *round) bool { @@ -183,8 +188,8 @@ func (r *round) IsQuotingWindowOpen() bool { return r.a != nil } -func (r *round) IsQuotingWindowEnd(currentHeight int64) bool { - _, _, delta, _ := r.getPosition(currentHeight) +func (r *round) IsQuotingWindowEnd(height int64) bool { + _, _, delta, _ := r.getPosition(height) return delta == r.quoteWindowSize } @@ -264,3 +269,22 @@ func (r *round) getPosition(currentHeight int64) (baseBlock, roundID, delta int6 baseBlock = currentHeight - delta return } + +func (r *round) baseBlockFromRoundID(roundID uint64) (uint64, bool) { + if roundID < uint64(r.startRoundID) { + return 0, false + } + ret := (roundID-uint64(r.startRoundID))*uint64(r.interval) + uint64(r.startBaseBlock) + if r.endBlock > 0 && ret > uint64(r.endBlock) { + return 0, false + } + return ret, true +} + +func (r *round) PieceCount() uint32 { + if r.m == nil { + return 0 + } + + return r.m.LeafCount() +} diff --git a/x/oracle/keeper/feedermanagement/types.go b/x/oracle/keeper/feedermanagement/types.go index 759a3d58c..0c9b992b8 100644 --- a/x/oracle/keeper/feedermanagement/types.go +++ b/x/oracle/keeper/feedermanagement/types.go @@ -221,6 +221,18 @@ type round struct { cache CacheReader // algo is the aggregation algorithm for current round to get final price algo AggAlgorithm + + // twoPhases indicates if the corresponding tokenfeeder requires 2-phase aggregation + twoPhases bool + // rawData is original data for tokenFeeder with 2-phases aggregation rule + // a validator can provide more than one rawData for one round + rawData [][]byte + // in 2-phases aggregation, the aggregated price is the hash root of pieces of rawData, when we received every piece to recover the whole original rawData, this flag is set to true + rawDataSealed bool + + m *oracletypes.MerkleTree + + h common.PostAggregationHandler } type orderedSliceInt64 []int64 diff --git a/x/oracle/keeper/keeper.go b/x/oracle/keeper/keeper.go index 6f98e6c35..828c73924 100644 --- a/x/oracle/keeper/keeper.go +++ b/x/oracle/keeper/keeper.go @@ -3,8 +3,6 @@ package keeper import ( "fmt" - sdkmath "cosmossdk.io/math" - "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -28,6 +26,7 @@ type ( assetsKeeper types.AssetsKeeper types.SlashingKeeper *feedermanagement.FeederManager + postHandlers map[int64]common.PostAggregationHandler } ) @@ -63,19 +62,12 @@ func NewKeeper( assetsKeeper: assetsKeeper, authority: authority, SlashingKeeper: slashingKeeper, - // fm: feedermanagement.NewFeederManager(nil), - FeederManager: feedermanagement.NewFeederManager(nil), + FeederManager: feedermanagement.NewFeederManager(nil), } - ret.SetKeeper(ret) + ret.FeederManager.SetKeeper(&ret) return ret } func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) } - -// UpdateNativeTokenValidatorInfo it's used to fix the issue of missing interface. -// it will be removed when merging with the oracle PR. -func (k Keeper) UpdateNativeTokenValidatorInfo(_ sdk.Context, _, _, _ string, _ sdkmath.Int) error { - return nil -} diff --git a/x/oracle/keeper/msg_server_price_feed.go b/x/oracle/keeper/msg_server_price_feed.go index 7dc48d131..69c94a20d 100644 --- a/x/oracle/keeper/msg_server_price_feed.go +++ b/x/oracle/keeper/msg_server_price_feed.go @@ -39,7 +39,19 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice return nil, types.ErrPriceProposalFormatInvalid.Wrap(err.Error()) } - // core logic and functionality of Price Aggregation + // goto rawData process which needs no 'aggragation', we just verify the provided piece with recoreded root which got consensus + if msg.IsPhaseTwo() { + err := ms.ProcessRawData(msg) + if err != nil { + return &types.MsgCreatePriceResponse{}, nil + } + logger.Error("quote of 2nd-phase for rawData failed", append(logQuote, "error", err)) + return nil, err + } + + // core logic and functionality of Price Aggregation for 1st phase including + // - price data + // - hash for big data finalPrice, err := ms.ProcessQuote(ctx, msg, ctx.IsCheckTx()) if err != nil { if sdkerrors.IsOf(err, types.ErrQuoteRecorded) { diff --git a/x/oracle/keeper/nonce.go b/x/oracle/keeper/nonce.go index 7c36a0cba..351af6d1e 100644 --- a/x/oracle/keeper/nonce.go +++ b/x/oracle/keeper/nonce.go @@ -26,8 +26,10 @@ func (k Keeper) RemoveNonceWithValidator(ctx sdk.Context, validator string) { k.removeNonceWithValidator(store, validator) } +// CheckAndIncreaseNonce check and increase the nonce for a specific validator and feederID +// valid nonce starts from 1 func (k Keeper) CheckAndIncreaseNonce(ctx sdk.Context, validator string, feederID uint64, nonce uint32) (prevNonce uint32, err error) { - maxNonce := k.GetMaxNonceFromCache() + maxNonce := k.FeederManager.GetMaxNonceFromCache() // #nosec G115 // safe conversion if nonce > uint32(maxNonce) { return 0, fmt.Errorf("nonce_check_failed: max_exceeded: limit=%d received=%d", maxNonce, nonce) diff --git a/x/oracle/keeper/native_token.go b/x/oracle/keeper/nst_post_aggregation.go similarity index 98% rename from x/oracle/keeper/native_token.go rename to x/oracle/keeper/nst_post_aggregation.go index 01e7440ef..9660297b4 100644 --- a/x/oracle/keeper/native_token.go +++ b/x/oracle/keeper/nst_post_aggregation.go @@ -13,6 +13,7 @@ import ( "github.com/ethereum/go-ethereum/common/hexutil" utils "github.com/imua-xyz/imuachain/utils" assetstypes "github.com/imua-xyz/imuachain/x/assets/types" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" "github.com/imua-xyz/imuachain/x/oracle/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -429,3 +430,8 @@ func getNSTVersionFromDetID(detID string) (int64, error) { } return nstVersion, nil } + +// UpdateNSTBalanceChange serves the post handling for nst balance change +func UpdateNSTBalanceChange(rawData []byte, ctx sdk.Context, k common.KeeperOracle) error { + return nil +} diff --git a/x/oracle/keeper/post_aggregation.go b/x/oracle/keeper/post_aggregation.go new file mode 100644 index 000000000..2fd13edf2 --- /dev/null +++ b/x/oracle/keeper/post_aggregation.go @@ -0,0 +1,21 @@ +package keeper + +import "github.com/imua-xyz/imuachain/x/oracle/keeper/common" + +// RegisterPostAggregation registers handler for tokenfeeder set with deterministic source which need to do some process with the deterministic aggregated result +// this is used to register the post handlers served for some customer defined deterministic source oracle requirement +func (k *Keeper) RegisterPostAggregation() { + // k.BondPostAggregation(1, UpdateNSTBalanceChange) +} + +func (k *Keeper) BondPostAggregation(feederID int64, postHandler common.PostAggregationHandler) { + k.postHandlers[feederID] = postHandler +} + +func (k *Keeper) GetPostAggregation(feederID int64) (handler common.PostAggregationHandler, found bool) { + if k.postHandlers == nil { + return nil, false + } + handler, found = k.postHandlers[feederID] + return +} diff --git a/x/oracle/keeper/two_phases.go b/x/oracle/keeper/two_phases.go new file mode 100644 index 000000000..dbfc86255 --- /dev/null +++ b/x/oracle/keeper/two_phases.go @@ -0,0 +1,93 @@ +package keeper + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/imua-xyz/imuachain/x/oracle/types" +) + +// SetNextPieceIndex sets the next-piece-index of feederID for 'node-recovery' +func (k Keeper) SetNextPieceIndex(ctx sdk.Context, feederID uint64, pieceIndex uint32) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + store.Set(key, types.Uint32Bytes(pieceIndex)) +} + +func (k Keeper) ClearNextPieceIndex(ctx sdk.Context, feederID uint64) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + store.Delete(key) +} + +// NextPieceIndexByFeederID read directly from memory and return the next-piece-index of input feederID +func (k Keeper) NextPieceIndexByFeederID(ctx sdk.Context, feederID uint64) (uint32, bool) { + return k.FeederManager.NextPieceIndexByFeederID(feederID) +} + +// CheckAndIncreasePieceIndex checks and increase the 'nextPieceIndex' of a specific validator and feederID +// valid pieceIndex starts from 0 +// returns (nextPieceIndexAfterIncreased, error) +func (k Keeper) CheckAndIncreaseNextPieceIndex(ctx sdk.Context, validator string, feederID uint64, nextPieceIndex uint32) (uint32, error) { + maxPieceIndex, ok := k.FeederManager.MaxPieceIndexForTokenFeederID(feederID) + if !ok { + return 0, fmt.Errorf("max piece index not found for feederID: %d", feederID) + } + if nextPieceIndex > maxPieceIndex { + return 0, fmt.Errorf("piece_index_check_failed: feederID:%d, max_piece_index:%d, got:%d", feederID, maxPieceIndex, nextPieceIndex) + } + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesValidatorPieceKey(validator, feederID) + bz := store.Get(key) + if bz == nil { + return 0, fmt.Errorf("piece_index_check_failed: validator_not_found: validator:%s, feeder_id:%d", validator, feederID) + } + expectedPieceIndex := types.BytesToUint32(bz) + if nextPieceIndex != expectedPieceIndex { + return 0, fmt.Errorf("piece_index_check_failed: non_conseecutive: expected:%d, recived:%d", expectedPieceIndex, nextPieceIndex) + } + store.Set(key, types.Uint32Bytes(nextPieceIndex+1)) + return nextPieceIndex + 1, nil +} + +func (k Keeper) Setup2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { + store := ctx.KVStore(k.storeKey) + // 1. set nextPieceIndex for feederID, first piece index is 0 + store.Set(types.TwoPhasesFeederKey(feederID), types.Uint32Bytes(0)) + + // 2. set nextPieceIndex for all activeValidators, fisr piece index is 0 + for _, validator := range validators { + store.Set(types.TwoPhasesValidatorPieceKey(validator, feederID), types.Uint32Bytes(0)) + } +} + +func (k Keeper) Clear2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { + // TODO(leonz): implement me + // 1. remove feederID->nextPieceIndex, 2. remove validator/feederID->nextPieceIndex + store := ctx.KVStore(k.storeKey) + store.Delete(types.TwoPhasesFeederKey(feederID)) + // 2. remove nextPieceIndex for validators + for _, validator := range validators { + store.Delete(types.TwoPhasesValidatorPieceKey(validator, feederID)) + } + +} + +// set feederID/pieceIndex -> rawData(]byte) + +// read feederID/pieceIndex -> rawData([]byte) + +// read all pieces of rawData from feederID/ + +// clear all rawData for feederID/ + +// update feederID -> proof([][]byte) + +// clear the whole proof under feederID + +// clear feederID/, clear: +// 1. rawData +// 2. proof +func (k Keeper) Clear2ndPhases(ctx sdk.Context, feederID uint64) { + +} diff --git a/x/oracle/module_beginblock.go b/x/oracle/module_beginblock.go index c010b6669..f4559c527 100644 --- a/x/oracle/module_beginblock.go +++ b/x/oracle/module_beginblock.go @@ -8,6 +8,6 @@ import ( ) // BeginBlock contains the logic that is automatically triggered at the beginning of each block -func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { - am.keeper.BeginBlock(ctx) +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { + am.keeper.BeginBlock(ctx, req) } diff --git a/x/oracle/types/codec.go b/x/oracle/types/codec.go index 0c846a3e4..6485b3642 100644 --- a/x/oracle/types/codec.go +++ b/x/oracle/types/codec.go @@ -14,11 +14,13 @@ func RegisterCodec(cdc *codec.LegacyAmino) { } func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + // TODO: remove this registry.RegisterImplementations((*sdk.Msg)(nil), &MsgCreatePrice{}, ) // this line is used by starport scaffolding # 3 + // this method registered sdk.Msg, so we don't need the above one msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) } diff --git a/x/oracle/types/key_two_phase.go b/x/oracle/types/key_two_phase.go new file mode 100644 index 000000000..1650f4ee9 --- /dev/null +++ b/x/oracle/types/key_two_phase.go @@ -0,0 +1,35 @@ +package types + +const ( + TwoPhasesPrefix = "TwoPhases/" + FeederPrefix = TwoPhasesPrefix + "feeder/" + ValidatorPrefix = TwoPhasesPrefix + "validator/" +) + +func TwoPhasesKeyPrefix() []byte { + return []byte(TwoPhasesPrefix) +} + +func TwoPhasesFeederKeyPrefix() []byte { + return []byte(FeederPrefix) +} + +func TwoPhasesValidatorKeyPrefix() []byte { + return []byte(ValidatorPrefix) +} + +func TwoPhasesFeederKey(feederID uint64) []byte { + var key []byte + key = append(key, FeederPrefix...) + key = append(key, Uint64Bytes(feederID)...) + return key +} + +func TwoPhasesValidatorPieceKey(validator string, feederID uint64) []byte { + var key []byte + key = append(key, ValidatorPrefix...) + key = append(key, []byte(validator)...) + key = append(key, DelimiterForCombinedKey) + key = append(key, Uint64Bytes(feederID)...) + return key +} diff --git a/x/oracle/types/merkletree.go b/x/oracle/types/merkletree.go new file mode 100644 index 000000000..1325533a7 --- /dev/null +++ b/x/oracle/types/merkletree.go @@ -0,0 +1,384 @@ +package types + +import ( + "bytes" + "crypto/sha256" +) + +// node represents leaf-node +type Node struct { + // hash *common.Hash + // use []byte instead of common.Hash for conveniences for appending + hash []byte + index uint32 + parent *Node + // left sibling + left *Node + // right sibling + right *Node +} +type MerkleTree struct { + root []byte + rootIndex uint32 + t map[uint32]*Node + pieces [][]byte + rawData []byte + leafCount uint32 + // size of bytes + pieceSize uint32 + minimalProofPath map[uint32][]uint32 +} + +// ordered bottom up +type Proof []*HashNode + +func (p *Proof) getHashByIndex(index uint32) []byte { + for _, hn := range *p { + if hn.Index == index { + return hn.Hash + } + } + return nil +} + +// hashNode represents a node including index and hash +type HashNode struct { + Index uint32 + Hash []byte +} + +// func (m *MerkleTree) getPathFromLeafIndex(index uint32) []uint32 { +func (m *MerkleTree) ProofPathFromLeafIndex(index uint32) []uint32 { + if index >= m.leafCount { + return nil + } + node, ok := m.t[index] + if !ok { + panic("merkle is not initialized correctly") + } + path := make([]uint32, 0) + for node != nil { + if node.left == nil && node.right == nil { + break + } + if node.left != nil { + path = append(path, node.left.index) + } else { + path = append(path, node.right.index) + } + node = node.parent + } + return path +} + +func (m *MerkleTree) UncachedProofPathFromLeafIndex(index uint32) []uint32 { + if index >= m.leafCount { + return nil + } + node, ok := m.t[index] + if !ok { + panic("merkle is not initialized correctly") + } + path := make([]uint32, 0) + for node != nil { + if node.left == nil && node.right == nil { + break + } + if node.left != nil { + if m.t[node.left.index].hash == nil { + path = append(path, node.left.index) + } + } else { + if m.t[node.right.index].hash == nil { + path = append(path, node.right.index) + } + } + node = node.parent + } + return path +} + +func (m *MerkleTree) VerifyAndCache(targetIndex uint32, targetPiece []byte, proof Proof) (cachedProof Proof, verified bool) { + if targetIndex >= m.leafCount { + return nil, false + } + tmpHash := sha256.Sum256(targetPiece) + hash := tmpHash[:] + // get hashed-leafnode + node := m.t[targetIndex] + if node.hash != nil { + return nil, bytes.Equal(node.hash, hash) + } + ret := make([]*HashNode, 0, len(proof)) + ret = append(ret, &HashNode{Index: node.index, Hash: hash}) + newNode := &Node{ + // tmp cache the unverified hash in a new node + hash: hash, + index: node.index, + parent: node.parent, + // left sibling + left: node.left, + // right sibling + right: node.right, + } + tmpNode := newNode + // only root have no sibling, and root must have hash + for tmpNode.right != nil || tmpNode.left != nil { + var parentHash []byte + var pairHash []byte + var pairNode *Node + var combinednodes []byte + + if tmpNode.right != nil { + pairNode = tmpNode.right + } else { + pairNode = tmpNode.left + } + + if pairHash = pairNode.hash; pairHash == nil { + pairHash = proof.getHashByIndex(pairNode.index) + if len(pairHash) == 0 { + return nil, false + } + ret = append(ret, &HashNode{Index: pairNode.index, Hash: pairHash}) + pairNode = &Node{ + index: pairNode.index, + hash: pairHash, + right: pairNode.right, + left: pairNode.left, + parent: pairNode.parent, + } + } + if tmpNode.right != nil { + tmpNode.right = pairNode + combinednodes = append(tmpNode.hash, pairHash...) + } else { + tmpNode.left = pairNode + combinednodes = append(pairHash, tmpNode.hash...) + } + + tmpHash = sha256.Sum256(combinednodes) + parentHash = tmpHash[:] + + if tmpNode.parent == nil { + // this should not happen + return nil, false + } + + parentNode := tmpNode.parent + if parentNode.hash != nil { + if bytes.Equal(parentNode.hash, parentHash) { + // update cache + m.t[targetIndex] = newNode + return ret, true + } + return nil, false + } + + // parent.hash == nil, new a cache node + parentNode = &Node{ + index: parentNode.index, + hash: parentHash, + left: parentNode.left, + right: parentNode.right, + parent: parentNode.parent, + } + + tmpNode.parent = parentNode + pairNode.parent = parentNode + + tmpNode = parentNode + } + return nil, false +} + +// RawData return the collected rawData pieces and true/false to tell wether got the complete raw data +// slice index is the same with the leaf node index +func (m *MerkleTree) CollectedPieces() ([][]byte, bool) { + if len(m.pieces) == int(m.leafCount) { + return m.pieces, true + } + return m.pieces, false +} + +func (m *MerkleTree) PieceByIndex(targetIndex uint32) ([]byte, bool) { + if int(targetIndex) >= len(m.pieces) { + return nil, false + } + return m.pieces[targetIndex], true +} + +// return rawData as a whole, with true/false to tell if we got the completed rawData +// when fasel, the returned first value should be nil +func (m *MerkleTree) CompleteRawData() ([]byte, bool) { + if len(m.pieces) < int(m.leafCount) { + return nil, false + } + if len(m.rawData) > 0 { + return m.rawData, true + } + + for _, piece := range m.pieces { + m.rawData = append(m.rawData, piece...) + } + return m.rawData, true +} + +// Completed returns wether the merkle tree has collected all pieces of rawData (all leaf nodes) +// when the MerkleTree is cleared, both len(pieces) and leafCount equal to 0, so it's also marked as 'completed' +// only when MerkleTree is set to non-zero leafCount with less amount of pieces than that leafCount we got false returned +// so when the return value is false, it also indicates that this MerkleTree is collecting pieces +func (m *MerkleTree) Completed() bool { + return len(m.pieces) == int(m.leafCount) +} + +// (0, true) means the first leaf node is cached +// (0, false) means there's no node cached yet +func (m *MerkleTree) LatestLeafIndex() (uint32, bool) { + if len(m.pieces) == 0 { + return 0, false + } + // #nosec G115 - checked 0 case + return uint32(len(m.pieces) - 1), true +} + +func (m *MerkleTree) MinimalProofPathByIndex(index uint32) []uint32 { + proofPath, ok := m.minimalProofPath[index] + if !ok { + return nil + } + return proofPath +} + +func (m *MerkleTree) LeafCount() uint32 { + return m.leafCount +} + +func (m *MerkleTree) NextPieceIndex() (uint32, bool) { + // #nosec G115 + idx := uint32(len(m.pieces)) + if idx >= m.leafCount { + return 0, false + } + + return idx, true +} + +func (m *MerkleTree) RootHash() []byte { + return m.root +} + +// NewMT new a merkle tree initialized with the topology from input pieceSize and totalSize +func NewMT(pieceSize, totalSize uint32, root []byte) *MerkleTree { + if totalSize == 0 { + return nil + } + leafCount := totalSize / pieceSize + if totalSize%pieceSize > 0 { + leafCount++ + } + originalLeafCount := leafCount + + ret := &MerkleTree{ + pieces: make([][]byte, 0, leafCount), + leafCount: leafCount, + pieceSize: pieceSize, + minimalProofPath: make(map[uint32][]uint32), + } + + if leafCount == 1 { + ret.t = map[uint32]*Node{0: {index: 1}} + return ret + } + + t := make(map[uint32]*Node) + prevLayersCount := uint32(0) + + for leafCount > 1 { + for i := uint32(0); i < leafCount; i += 2 { + idx := i + prevLayersCount + + lNode := t[idx] + if lNode == nil { + lNode = &Node{index: idx} + t[idx] = lNode + } + + if i+1 < leafCount { + rNode := t[idx+1] + if rNode == nil { + rNode = &Node{index: idx + 1} + t[idx+1] = rNode + } + + if lNode.right == nil { + lNode.right = rNode + } + if rNode.left == nil { + rNode.left = lNode + } + + // node pair derived a parent on upper level + parentIdx := i/2 + prevLayersCount + leafCount + parentNode := t[parentIdx] + if parentNode == nil { + parentNode = &Node{index: parentIdx} + t[parentIdx] = parentNode + } + if lNode.parent == nil { + lNode.parent = parentNode + } + if rNode.parent == nil { + rNode.parent = parentNode + } + } else { + // lNode is a single node without pair, linked to no parent at this level, move it up to the end of next upper level + liftedNodeIndex := i/2 + prevLayersCount + leafCount + liftedNode := t[liftedNodeIndex] + if liftedNode == nil { + if lNode.index >= originalLeafCount { + delete(t, lNode.index) + lNode.index = liftedNodeIndex + } + t[liftedNodeIndex] = lNode + } else { + panic("liftedNode must be nil when do lifting") + } + // #nosec G115 + break + } + } + prevLayersCount += leafCount + if leafCount%2 == 1 { + leafCount = leafCount/2 + 1 + } else { + leafCount /= 2 + } + } + t[prevLayersCount] = &Node{ + index: prevLayersCount, + hash: root, + // root node, got no parent or siblings + } + + ret.t = t + ret.root = root + ret.rootIndex = prevLayersCount + + tmpIndex := make(map[uint32]struct{}) + for i := uint32(0); i < ret.leafCount; i++ { + path := ret.ProofPathFromLeafIndex(i) + minimalPath := make([]uint32, 0, len(path)) + for _, pIndex := range path { + if _, ok := tmpIndex[pIndex]; !ok { + tmpIndex[pIndex] = struct{}{} + minimalPath = append(minimalPath, pIndex) + } + } + if len(minimalPath) > 0 { + ret.minimalProofPath[i] = minimalPath + } + } + + return ret +} diff --git a/x/oracle/types/merkletree_test.go b/x/oracle/types/merkletree_test.go new file mode 100644 index 000000000..f546c088e --- /dev/null +++ b/x/oracle/types/merkletree_test.go @@ -0,0 +1,82 @@ +package types + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestMerkleTree(t *testing.T) { + fmt.Println("test on 6 pieces") + test6pieces() + + fmt.Println("test on 5 pieces") + test5pieces() + + fmt.Println("get proofPath from 6 pieces") + m := NewMT(20, 120) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(0), []uint32{1, 7, 10}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(1), []uint32{0, 7, 10}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(2), []uint32{3, 6, 10}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(3), []uint32{2, 6, 10}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(4), []uint32{5, 9}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(5), []uint32{4, 9}) + + fmt.Println("get proofPath from 5 pieces") + m = NewMT(20, 100) + // if limit the provider to upload pieces ordered, then with cache, we only need proof: + // (1,6,4), (-), (3), (2) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(0), []uint32{1, 6, 4}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(1), []uint32{0, 6, 4}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(2), []uint32{3, 5, 4}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(3), []uint32{2, 5, 4}) + require.ElementsMatch(t, m.ProofPathFromLeafIndex(4), []uint32{8}) +} + +func test6pieces() { + m := NewMT(20, 120) + fmt.Println(len(m.t)) + + n := m.t[0] + for n != nil { + fmt.Println("node_index:", n.index) + if n.left != nil { + fmt.Println(" left sibling:", n.left.index) + n = n.parent + continue + } + if n.right != nil { + fmt.Println(" right sibling:", n.right.index) + n = n.parent + continue + } + fmt.Println("this is root node") + break + } + fmt.Println(m.t[8] == nil) + fmt.Println(m.t[10].index == 10) + fmt.Println(m.t[4].parent == m.t[5].parent) + fmt.Println(m.t[4].parent.index == 10) +} + +func test5pieces() { + m := NewMT(20, 100) + fmt.Println(len(m.t)) + n := m.t[0] + for n != nil { + fmt.Println("node_index:", n.index) + if n.left != nil { + fmt.Println(" left sibling:", n.left.index) + n = n.parent + continue + } + if n.right != nil { + fmt.Println(" right sibling:", n.right.index) + n = n.parent + continue + } + fmt.Println("this is root node") + break + } +} diff --git a/x/oracle/types/message_create_price.go b/x/oracle/types/message_create_price.go index 9a98da606..0107f2707 100644 --- a/x/oracle/types/message_create_price.go +++ b/x/oracle/types/message_create_price.go @@ -48,3 +48,27 @@ func (msg *MsgCreatePrice) ValidateBasic() error { } return nil } + +func (msg *MsgCreatePrice) IsPhaseOne() bool { + return msg.Phase == AggregationPhaseOne +} + +func (msg *MsgCreatePrice) IsPhaseTwo() bool { + return msg.Phase == AggregationPhaseTwo +} + +// NOTE: this should be the only way a MsgCreatePriceRawData is derived +// GetRawData returns wether this is a message with piece of rawData, and parse rawData piece if true +// NOTE: all method for MsgCreatePriceRawData is assumed that the MsgCreatePriceRawData is derived from MsgCreatePrice by 'feederManager.GetRawData' which had done the basic veirfy, so we don't do that repeatedly + +func (msgP *MsgCreatePriceRawData) PieceIndex() uint32 { + return msgP.Piece.Index +} + +func (msgP *MsgCreatePriceRawData) GetPieceWithProof() *PieceWithProof { + return msgP.Piece +} + +func (msgP *MsgCreatePriceRawData) GetPieceRawData() []byte { + return msgP.Piece.RawData +} diff --git a/x/oracle/types/params.go b/x/oracle/types/params.go index ea6ab14fa..cdce66c30 100644 --- a/x/oracle/types/params.go +++ b/x/oracle/types/params.go @@ -67,6 +67,11 @@ func DefaultParams() Params { Rules: []*RuleSource{ // 0 is reserved {}, + { + // all sources math + SourceIDs: []uint64{0}, + Nom: nil, + }, }, // TokenFeeder describes when a token start to be updated with its price, and the frequency, endTime. TokenFeeders: []*TokenFeeder{ @@ -632,3 +637,11 @@ func (p Params) IsSlashingResetUpdate(params *Params) bool { } return false } + +func (p Params) IsNST(tokenID int) bool { + if tokenID >= len(p.Tokens) { + return false + } + token := p.Tokens[tokenID] + return strings.HasPrefix(strings.ToLower(token.AssetID), NSTIDPrefix) +} diff --git a/x/oracle/types/params.pb.go b/x/oracle/types/params.pb.go index e5349e05e..306c29a73 100644 --- a/x/oracle/types/params.pb.go +++ b/x/oracle/types/params.pb.go @@ -84,6 +84,8 @@ type Params struct { MaxSizePrices int32 `protobuf:"varint,11,opt,name=max_size_prices,json=maxSizePrices,proto3" json:"max_size_prices,omitempty"` // slashing defines the slashing related params Slashing *SlashingParams `protobuf:"bytes,12,opt,name=slashing,proto3" json:"slashing,omitempty"` + // piece_size_byte defines how many bytes one piece of raw data includes + PieceSizeByte uint32 `protobuf:"varint,13,opt,name=piece_size_byte,json=pieceSizeByte,proto3" json:"piece_size_byte,omitempty"` } func (m *Params) Reset() { *m = Params{} } @@ -202,6 +204,13 @@ func (m *Params) GetSlashing() *SlashingParams { return nil } +func (m *Params) GetPieceSizeByte() uint32 { + if m != nil { + return m.PieceSizeByte + } + return 0 +} + // slashing related params type SlashingParams struct { // reported_rounds_window defines how many rounds included in one window for performance review of missing report @@ -279,56 +288,57 @@ func init() { func init() { proto.RegisterFile("imuachain/oracle/v1/params.proto", fileDescriptor_a6dd1f02735bb411) } var fileDescriptor_a6dd1f02735bb411 = []byte{ - // 771 bytes of a gzipped FileDescriptorProto + // 796 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xbf, 0x4f, 0xf3, 0x46, - 0x18, 0xc7, 0x63, 0xf2, 0x03, 0xb8, 0x10, 0x0a, 0x07, 0xa5, 0x47, 0x40, 0x8e, 0x45, 0x25, 0x14, - 0xa1, 0x62, 0x8b, 0x50, 0x3a, 0x54, 0x95, 0x2a, 0x42, 0x12, 0x89, 0x4a, 0x84, 0xc8, 0x29, 0xaa, - 0xd4, 0xc5, 0xba, 0xd8, 0x97, 0xe4, 0xc0, 0xf6, 0x45, 0x3e, 0x1b, 0x02, 0x6b, 0x97, 0x8a, 0xa9, - 0x23, 0x0b, 0x52, 0xa5, 0x2e, 0x1d, 0xfb, 0x67, 0xd0, 0x8d, 0xb1, 0xea, 0x40, 0x2b, 0x18, 0xfa, - 0x17, 0x74, 0xaf, 0x7c, 0xb6, 0x03, 0x89, 0xd2, 0x57, 0x7a, 0xdf, 0x25, 0xf1, 0xdd, 0xf3, 0xf9, - 0x3e, 0xdf, 0xe7, 0x79, 0x7c, 0x67, 0xa0, 0x50, 0x27, 0xc0, 0x66, 0x1f, 0x53, 0x57, 0x63, 0x1e, - 0x36, 0x6d, 0xa2, 0x5d, 0xee, 0x69, 0x03, 0xec, 0x61, 0x87, 0xab, 0x03, 0x8f, 0xf9, 0x0c, 0xae, - 0x8c, 0x08, 0x35, 0x22, 0xd4, 0xcb, 0xbd, 0xe2, 0x32, 0x76, 0xa8, 0xcb, 0x34, 0xf1, 0x1b, 0x71, - 0xc5, 0xd5, 0x1e, 0xeb, 0x31, 0xf1, 0xa8, 0x85, 0x4f, 0xf1, 0xae, 0xdc, 0x63, 0xac, 0x67, 0x13, - 0x4d, 0xac, 0x3a, 0x41, 0x57, 0xb3, 0x02, 0x0f, 0xfb, 0x94, 0xb9, 0x49, 0x7c, 0x9a, 0x3f, 0x75, - 0xbb, 0x89, 0x7e, 0x7b, 0x5a, 0xdc, 0x67, 0x17, 0xc4, 0x35, 0xba, 0x84, 0x58, 0xc4, 0x8b, 0xb8, - 0xad, 0xdf, 0x33, 0x20, 0xd7, 0x12, 0x65, 0xc3, 0x0a, 0xc8, 0x09, 0x01, 0x47, 0x92, 0x92, 0x2e, - 0xe7, 0x2b, 0x45, 0x75, 0x4a, 0x07, 0xea, 0x51, 0xb8, 0xd6, 0x63, 0x32, 0xd4, 0x88, 0xa4, 0x1c, - 0xcd, 0xbc, 0x43, 0xf3, 0x6d, 0x88, 0xe8, 0x31, 0x09, 0x0f, 0xc0, 0x2c, 0x67, 0x81, 0x67, 0x12, - 0x8e, 0xd2, 0x42, 0xb4, 0x31, 0x55, 0xd4, 0x16, 0x8c, 0x9e, 0xb0, 0xf0, 0x00, 0x64, 0xbd, 0xc0, - 0x26, 0x1c, 0x65, 0x84, 0xa8, 0x34, 0x55, 0xa4, 0x07, 0x36, 0x89, 0x85, 0x11, 0x0d, 0xeb, 0xa0, - 0xf0, 0xb6, 0x6d, 0x8e, 0xb2, 0x42, 0xae, 0xfc, 0x7f, 0xa1, 0x0d, 0x01, 0xea, 0x0b, 0xfe, 0xeb, - 0x82, 0xc3, 0x0d, 0x30, 0xef, 0xe0, 0xa1, 0xe1, 0x32, 0xd7, 0x24, 0x28, 0xa7, 0x48, 0xe5, 0xac, - 0x3e, 0xe7, 0xe0, 0x61, 0x33, 0x5c, 0xc3, 0x12, 0xc8, 0xfb, 0x7d, 0x8f, 0xf0, 0x3e, 0xb3, 0x2d, - 0x03, 0xa3, 0x59, 0x11, 0x06, 0xa3, 0xad, 0xc3, 0x71, 0xa0, 0x83, 0xe6, 0x26, 0x80, 0x2a, 0xfc, - 0x02, 0x64, 0x1c, 0x66, 0x11, 0x34, 0xaf, 0x48, 0xe5, 0xc5, 0xca, 0xd6, 0xf4, 0xc9, 0x33, 0x97, - 0x13, 0x97, 0x07, 0xfc, 0x84, 0x59, 0x44, 0x17, 0x3c, 0xdc, 0x04, 0x20, 0x2c, 0xcb, 0x22, 0xbe, - 0x41, 0x2d, 0x04, 0x46, 0x75, 0xd5, 0x88, 0x7f, 0x6c, 0xc1, 0x6d, 0xf0, 0x51, 0x18, 0xe5, 0xf4, - 0x86, 0x18, 0x03, 0x8f, 0x86, 0x13, 0xcf, 0x0b, 0xa4, 0xe0, 0xe0, 0x61, 0x9b, 0xde, 0x90, 0x96, - 0xd8, 0x84, 0x5f, 0x83, 0x39, 0x6e, 0x63, 0xde, 0xa7, 0x6e, 0x0f, 0x2d, 0x28, 0x52, 0x39, 0x5f, - 0xf9, 0x74, 0xfa, 0x2b, 0x89, 0xa1, 0xe8, 0xc0, 0xe8, 0x23, 0xd1, 0x97, 0x99, 0xbb, 0x9f, 0x4b, - 0xa9, 0xad, 0x7f, 0xd3, 0x60, 0x71, 0x1c, 0x81, 0x9f, 0x83, 0x35, 0x8f, 0x0c, 0x98, 0xe7, 0x13, - 0xcb, 0xf0, 0x58, 0xe0, 0x5a, 0xdc, 0xb8, 0xa2, 0xae, 0xc5, 0xae, 0x90, 0xa4, 0x48, 0xe5, 0xb4, - 0xbe, 0x9a, 0x44, 0x75, 0x11, 0xfc, 0x4e, 0xc4, 0xe0, 0x39, 0xf8, 0xc4, 0xa1, 0xae, 0x31, 0x52, - 0x0e, 0x88, 0x97, 0xc8, 0x66, 0x14, 0xa9, 0xbc, 0x50, 0xdd, 0x7f, 0x78, 0x2a, 0xa5, 0xfe, 0x7c, - 0x2a, 0x6d, 0xf7, 0xa8, 0xdf, 0x0f, 0x3a, 0xaa, 0xc9, 0x1c, 0xcd, 0x64, 0xdc, 0x61, 0x3c, 0xfe, - 0xdb, 0xe5, 0xd6, 0x85, 0xe6, 0x5f, 0x0f, 0x08, 0x57, 0x6b, 0xc4, 0xfc, 0xf5, 0x9f, 0xdf, 0x76, - 0x24, 0x7d, 0xd5, 0xa1, 0xae, 0x1e, 0xa7, 0x6c, 0x11, 0x2f, 0xf6, 0x32, 0xc1, 0x7a, 0xd4, 0xa0, - 0xe1, 0x50, 0xce, 0x8d, 0x73, 0x4c, 0x6d, 0x23, 0xb9, 0x6b, 0x28, 0x2d, 0x86, 0xb1, 0xae, 0x46, - 0x97, 0x51, 0x4d, 0x2e, 0xa3, 0x5a, 0x8b, 0x81, 0x6a, 0x21, 0x2c, 0xe4, 0xee, 0xaf, 0x92, 0x14, - 0x59, 0xac, 0x45, 0xa9, 0x4e, 0x28, 0xe7, 0xdf, 0x60, 0x6a, 0x27, 0x18, 0x74, 0x80, 0x9c, 0x98, - 0x60, 0x9b, 0x9a, 0x94, 0x05, 0x93, 0x4e, 0x99, 0xf7, 0x74, 0xda, 0x88, 0x9d, 0x92, 0x74, 0x13, - 0x76, 0x48, 0xbc, 0x1a, 0xa3, 0xeb, 0x61, 0x33, 0xdc, 0x79, 0xb5, 0x45, 0xd9, 0x0f, 0x1f, 0xe0, - 0x9a, 0x48, 0xda, 0x88, 0x73, 0x8e, 0xac, 0x77, 0x7e, 0x90, 0x40, 0x61, 0xec, 0x70, 0xc2, 0xaf, - 0x40, 0xf1, 0xe8, 0xb4, 0xd9, 0xae, 0x37, 0xdb, 0x67, 0x6d, 0xe3, 0xe4, 0xb4, 0x56, 0x37, 0xce, - 0x9a, 0xed, 0x56, 0xfd, 0xe8, 0xb8, 0x71, 0x5c, 0xaf, 0x2d, 0xa5, 0x8a, 0x9b, 0xb7, 0xf7, 0x0a, - 0x1a, 0x93, 0x9c, 0xb9, 0x7c, 0x40, 0x4c, 0xda, 0xa5, 0xc4, 0x82, 0x2a, 0x58, 0x99, 0x50, 0x1f, - 0xb6, 0x0f, 0x5b, 0x4b, 0x52, 0xf1, 0xe3, 0xdb, 0x7b, 0x65, 0x79, 0x4c, 0x16, 0x06, 0x8a, 0x99, - 0x1f, 0x7f, 0x91, 0x53, 0xd5, 0xc6, 0xc3, 0xb3, 0x2c, 0x3d, 0x3e, 0xcb, 0xd2, 0xdf, 0xcf, 0xb2, - 0xf4, 0xd3, 0x8b, 0x9c, 0x7a, 0x7c, 0x91, 0x53, 0x7f, 0xbc, 0xc8, 0xa9, 0xef, 0x3f, 0x7b, 0xd3, - 0x64, 0x78, 0xac, 0x77, 0x87, 0xd7, 0x37, 0xda, 0xeb, 0xf7, 0x71, 0x98, 0x7c, 0x21, 0x45, 0xbb, - 0x9d, 0x9c, 0x98, 0xfd, 0xfe, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xb5, 0x80, 0xcf, 0xe2, - 0x05, 0x00, 0x00, + 0x18, 0xc7, 0xe3, 0x37, 0x3f, 0x5e, 0xde, 0x0b, 0x79, 0xfb, 0x72, 0x50, 0x7a, 0x04, 0xe4, 0x58, + 0x54, 0x42, 0x11, 0x2a, 0xb6, 0x08, 0xa5, 0x43, 0x55, 0xa9, 0x22, 0x24, 0x91, 0xa8, 0x44, 0x88, + 0x9c, 0xa2, 0x4a, 0x5d, 0xac, 0x8b, 0x7d, 0x49, 0x0e, 0x6c, 0x9f, 0xe5, 0xb3, 0x21, 0x61, 0xed, + 0x52, 0x31, 0x75, 0x64, 0x41, 0xaa, 0xd4, 0xa5, 0x63, 0xff, 0x0c, 0x46, 0xc6, 0xaa, 0x03, 0xad, + 0x60, 0xe8, 0xdc, 0xa1, 0x7b, 0xe5, 0xb3, 0x1d, 0x48, 0x94, 0x56, 0xea, 0xbb, 0x24, 0xbe, 0xe7, + 0xf9, 0x7c, 0x9f, 0xef, 0x73, 0x8f, 0xef, 0x0c, 0x14, 0xea, 0x84, 0xd8, 0x1c, 0x62, 0xea, 0x6a, + 0xcc, 0xc7, 0xa6, 0x4d, 0xb4, 0x8b, 0x5d, 0xcd, 0xc3, 0x3e, 0x76, 0xb8, 0xea, 0xf9, 0x2c, 0x60, + 0x70, 0x79, 0x42, 0xa8, 0x31, 0xa1, 0x5e, 0xec, 0x96, 0x97, 0xb0, 0x43, 0x5d, 0xa6, 0x89, 0xdf, + 0x98, 0x2b, 0xaf, 0x0c, 0xd8, 0x80, 0x89, 0x47, 0x2d, 0x7a, 0x4a, 0xa2, 0xf2, 0x80, 0xb1, 0x81, + 0x4d, 0x34, 0xb1, 0xea, 0x85, 0x7d, 0xcd, 0x0a, 0x7d, 0x1c, 0x50, 0xe6, 0xa6, 0xf9, 0x79, 0xfe, + 0xd4, 0xed, 0xa7, 0xfa, 0xad, 0x79, 0xf9, 0x80, 0x9d, 0x13, 0xd7, 0xe8, 0x13, 0x62, 0x11, 0x3f, + 0xe6, 0x36, 0xff, 0xca, 0x81, 0x42, 0x47, 0xb4, 0x0d, 0x6b, 0xa0, 0x20, 0x04, 0x1c, 0x49, 0x4a, + 0xb6, 0x5a, 0xac, 0x95, 0xd5, 0x39, 0x3b, 0x50, 0x0f, 0xa3, 0xb5, 0x9e, 0x90, 0x91, 0x46, 0x14, + 0xe5, 0xe8, 0xd5, 0x7f, 0x68, 0xbe, 0x8e, 0x10, 0x3d, 0x21, 0xe1, 0x3e, 0x78, 0xcd, 0x59, 0xe8, + 0x9b, 0x84, 0xa3, 0xac, 0x10, 0xad, 0xcf, 0x15, 0x75, 0x05, 0xa3, 0xa7, 0x2c, 0xdc, 0x07, 0x79, + 0x3f, 0xb4, 0x09, 0x47, 0x39, 0x21, 0xaa, 0xcc, 0x15, 0xe9, 0xa1, 0x4d, 0x12, 0x61, 0x4c, 0xc3, + 0x26, 0x28, 0xbd, 0xdc, 0x36, 0x47, 0x79, 0x21, 0x57, 0xfe, 0xbd, 0xd1, 0x96, 0x00, 0xf5, 0xc5, + 0xe0, 0x79, 0xc1, 0xe1, 0x3a, 0x78, 0xe3, 0xe0, 0x91, 0xe1, 0x32, 0xd7, 0x24, 0xa8, 0xa0, 0x48, + 0xd5, 0xbc, 0xbe, 0xe0, 0xe0, 0x51, 0x3b, 0x5a, 0xc3, 0x0a, 0x28, 0x06, 0x43, 0x9f, 0xf0, 0x21, + 0xb3, 0x2d, 0x03, 0xa3, 0xd7, 0x22, 0x0d, 0x26, 0xa1, 0x83, 0x69, 0xa0, 0x87, 0x16, 0x66, 0x80, + 0x3a, 0xfc, 0x0c, 0xe4, 0x1c, 0x66, 0x11, 0xf4, 0x46, 0x91, 0xaa, 0x6f, 0x6b, 0x9b, 0xf3, 0x27, + 0xcf, 0x5c, 0x4e, 0x5c, 0x1e, 0xf2, 0x63, 0x66, 0x11, 0x5d, 0xf0, 0x70, 0x03, 0x80, 0xa8, 0x2d, + 0x8b, 0x04, 0x06, 0xb5, 0x10, 0x98, 0xf4, 0xd5, 0x20, 0xc1, 0x91, 0x05, 0xb7, 0xc0, 0x07, 0x51, + 0x96, 0xd3, 0x2b, 0x62, 0x78, 0x3e, 0x8d, 0x26, 0x5e, 0x14, 0x48, 0xc9, 0xc1, 0xa3, 0x2e, 0xbd, + 0x22, 0x1d, 0x11, 0x84, 0x5f, 0x82, 0x05, 0x6e, 0x63, 0x3e, 0xa4, 0xee, 0x00, 0x2d, 0x2a, 0x52, + 0xb5, 0x58, 0xfb, 0x78, 0xfe, 0x2b, 0x49, 0xa0, 0xf8, 0xc0, 0xe8, 0x13, 0x51, 0x64, 0xe4, 0x51, + 0x62, 0x92, 0xd8, 0xaa, 0x37, 0x0e, 0x08, 0x2a, 0x29, 0x52, 0xb5, 0xa4, 0x97, 0x44, 0x38, 0xb2, + 0xaa, 0x8f, 0x03, 0xf2, 0x79, 0xee, 0xe6, 0xc7, 0x4a, 0x66, 0xf3, 0xef, 0x2c, 0x78, 0x3b, 0x5d, + 0x0a, 0x7e, 0x0a, 0x56, 0x7d, 0xe2, 0x31, 0x3f, 0x20, 0x96, 0xe1, 0xb3, 0xd0, 0xb5, 0xb8, 0x71, + 0x49, 0x5d, 0x8b, 0x5d, 0x22, 0x49, 0x91, 0xaa, 0x59, 0x7d, 0x25, 0xcd, 0xea, 0x22, 0xf9, 0x8d, + 0xc8, 0xc1, 0x33, 0xf0, 0x91, 0x43, 0x5d, 0x63, 0xa2, 0xf4, 0x88, 0x9f, 0xca, 0x5e, 0x29, 0x52, + 0x75, 0xb1, 0xbe, 0x77, 0xf7, 0x50, 0xc9, 0xfc, 0xf6, 0x50, 0xd9, 0x1a, 0xd0, 0x60, 0x18, 0xf6, + 0x54, 0x93, 0x39, 0x9a, 0xc9, 0xb8, 0xc3, 0x78, 0xf2, 0xb7, 0xc3, 0xad, 0x73, 0x2d, 0x18, 0x7b, + 0x84, 0xab, 0x0d, 0x62, 0xfe, 0xfc, 0xe7, 0x2f, 0xdb, 0x92, 0xbe, 0xe2, 0x50, 0x57, 0x4f, 0x4a, + 0x76, 0x88, 0x9f, 0x78, 0x99, 0x60, 0x2d, 0x1e, 0x84, 0xe1, 0x50, 0xce, 0x8d, 0x33, 0x4c, 0x6d, + 0x23, 0xbd, 0x93, 0x28, 0x2b, 0x86, 0xb6, 0xa6, 0xc6, 0x97, 0x56, 0x4d, 0x2f, 0xad, 0xda, 0x48, + 0x80, 0x7a, 0x29, 0x6a, 0xe4, 0xe6, 0xf7, 0x8a, 0x14, 0x5b, 0xac, 0xc6, 0xa5, 0x8e, 0x29, 0xe7, + 0x5f, 0x61, 0x6a, 0xa7, 0x18, 0x74, 0x80, 0x9c, 0x9a, 0x60, 0x9b, 0x9a, 0x94, 0x85, 0xb3, 0x4e, + 0xb9, 0xff, 0xe9, 0xb4, 0x9e, 0x38, 0xa5, 0xe5, 0x66, 0xec, 0x90, 0x78, 0x85, 0x46, 0xdf, 0xc7, + 0x66, 0x14, 0x79, 0xb6, 0x45, 0xf9, 0xf7, 0x1f, 0xe0, 0xaa, 0x28, 0xda, 0x4a, 0x6a, 0x4e, 0xac, + 0xb7, 0xbf, 0x93, 0x40, 0x69, 0xea, 0x10, 0xc3, 0x2f, 0x40, 0xf9, 0xf0, 0xa4, 0xdd, 0x6d, 0xb6, + 0xbb, 0xa7, 0x5d, 0xe3, 0xf8, 0xa4, 0xd1, 0x34, 0x4e, 0xdb, 0xdd, 0x4e, 0xf3, 0xf0, 0xa8, 0x75, + 0xd4, 0x6c, 0xbc, 0xcb, 0x94, 0x37, 0xae, 0x6f, 0x15, 0x34, 0x25, 0x39, 0x75, 0xb9, 0x47, 0x4c, + 0xda, 0xa7, 0xc4, 0x82, 0x2a, 0x58, 0x9e, 0x51, 0x1f, 0x74, 0x0f, 0x3a, 0xef, 0xa4, 0xf2, 0x87, + 0xd7, 0xb7, 0xca, 0xd2, 0x94, 0x2c, 0x4a, 0x94, 0x73, 0xdf, 0xff, 0x24, 0x67, 0xea, 0xad, 0xbb, + 0x47, 0x59, 0xba, 0x7f, 0x94, 0xa5, 0x3f, 0x1e, 0x65, 0xe9, 0x87, 0x27, 0x39, 0x73, 0xff, 0x24, + 0x67, 0x7e, 0x7d, 0x92, 0x33, 0xdf, 0x7e, 0xf2, 0x62, 0x93, 0xd1, 0xf1, 0xdf, 0x19, 0x8d, 0xaf, + 0xb4, 0xe7, 0xef, 0xe8, 0x28, 0xfd, 0x92, 0x8a, 0xed, 0xf6, 0x0a, 0x62, 0xf6, 0x7b, 0xff, 0x04, + 0x00, 0x00, 0xff, 0xff, 0x35, 0xdb, 0xf6, 0x95, 0x0a, 0x06, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -351,6 +361,11 @@ func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.PieceSizeByte != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.PieceSizeByte)) + i-- + dAtA[i] = 0x68 + } if m.Slashing != nil { { size, err := m.Slashing.MarshalToSizedBuffer(dAtA[:i]) @@ -599,6 +614,9 @@ func (m *Params) Size() (n int) { l = m.Slashing.Size() n += 1 + l + sovParams(uint64(l)) } + if m.PieceSizeByte != 0 { + n += 1 + sovParams(uint64(m.PieceSizeByte)) + } return n } @@ -977,6 +995,25 @@ func (m *Params) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PieceSizeByte", wireType) + } + m.PieceSizeByte = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PieceSizeByte |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) diff --git a/x/oracle/types/rawdata_nst.pb.go b/x/oracle/types/rawdata_nst.pb.go new file mode 100644 index 000000000..5e1dbfa38 --- /dev/null +++ b/x/oracle/types/rawdata_nst.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: imuachain/oracle/v1/rawdata_nst.proto + +package types + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// NSTKV key-value pair to tell staker_index and its corresponding balance change +type NSTKV struct { + // staker index for a nst defined on imuachain side + StakerIndex uint32 `protobuf:"varint,1,opt,name=staker_index,json=stakerIndex,proto3" json:"staker_index,omitempty"` + // balance change since last update + BalanceChange int64 `protobuf:"varint,2,opt,name=balance_change,json=balanceChange,proto3" json:"balance_change,omitempty"` +} + +func (m *NSTKV) Reset() { *m = NSTKV{} } +func (m *NSTKV) String() string { return proto.CompactTextString(m) } +func (*NSTKV) ProtoMessage() {} +func (*NSTKV) Descriptor() ([]byte, []int) { + return fileDescriptor_77c72bbd82fbb1be, []int{0} +} +func (m *NSTKV) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NSTKV) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_NSTKV.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *NSTKV) XXX_Merge(src proto.Message) { + xxx_messageInfo_NSTKV.Merge(m, src) +} +func (m *NSTKV) XXX_Size() int { + return m.Size() +} +func (m *NSTKV) XXX_DiscardUnknown() { + xxx_messageInfo_NSTKV.DiscardUnknown(m) +} + +var xxx_messageInfo_NSTKV proto.InternalMessageInfo + +func (m *NSTKV) GetStakerIndex() uint32 { + if m != nil { + return m.StakerIndex + } + return 0 +} + +func (m *NSTKV) GetBalanceChange() int64 { + if m != nil { + return m.BalanceChange + } + return 0 +} + +// RawDataNST represents balance changes of all stakers for a NST +type RawDataNST struct { + // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed + NstBalanceChanges []*NSTKV `protobuf:"bytes,1,rep,name=nst_balance_changes,json=nstBalanceChanges,proto3" json:"nst_balance_changes,omitempty"` +} + +func (m *RawDataNST) Reset() { *m = RawDataNST{} } +func (m *RawDataNST) String() string { return proto.CompactTextString(m) } +func (*RawDataNST) ProtoMessage() {} +func (*RawDataNST) Descriptor() ([]byte, []int) { + return fileDescriptor_77c72bbd82fbb1be, []int{1} +} +func (m *RawDataNST) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawDataNST) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RawDataNST.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RawDataNST) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawDataNST.Merge(m, src) +} +func (m *RawDataNST) XXX_Size() int { + return m.Size() +} +func (m *RawDataNST) XXX_DiscardUnknown() { + xxx_messageInfo_RawDataNST.DiscardUnknown(m) +} + +var xxx_messageInfo_RawDataNST proto.InternalMessageInfo + +func (m *RawDataNST) GetNstBalanceChanges() []*NSTKV { + if m != nil { + return m.NstBalanceChanges + } + return nil +} + +func init() { + proto.RegisterType((*NSTKV)(nil), "imuachain.oracle.v1.NSTKV") + proto.RegisterType((*RawDataNST)(nil), "imuachain.oracle.v1.RawDataNST") +} + +func init() { + proto.RegisterFile("imuachain/oracle/v1/rawdata_nst.proto", fileDescriptor_77c72bbd82fbb1be) +} + +var fileDescriptor_77c72bbd82fbb1be = []byte{ + // 254 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcd, 0xcc, 0x2d, 0x4d, + 0x4c, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0xcf, 0x2f, 0x4a, 0x4c, 0xce, 0x49, 0xd5, 0x2f, 0x33, 0xd4, + 0x2f, 0x4a, 0x2c, 0x4f, 0x49, 0x2c, 0x49, 0x8c, 0xcf, 0x2b, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x12, 0x86, 0x2b, 0xd3, 0x83, 0x28, 0xd3, 0x2b, 0x33, 0x54, 0x0a, 0xe4, 0x62, 0xf5, + 0x0b, 0x0e, 0xf1, 0x0e, 0x13, 0x52, 0xe4, 0xe2, 0x29, 0x2e, 0x49, 0xcc, 0x4e, 0x2d, 0x8a, 0xcf, + 0xcc, 0x4b, 0x49, 0xad, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x86, 0x88, 0x79, 0x82, + 0x84, 0x84, 0x54, 0xb9, 0xf8, 0x92, 0x12, 0x73, 0x12, 0xf3, 0x92, 0x53, 0xe3, 0x93, 0x33, 0x12, + 0xf3, 0xd2, 0x53, 0x25, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0x78, 0xa1, 0xa2, 0xce, 0x60, 0x41, + 0xa5, 0x08, 0x2e, 0xae, 0xa0, 0xc4, 0x72, 0x97, 0xc4, 0x92, 0x44, 0xbf, 0xe0, 0x10, 0x21, 0x2f, + 0x2e, 0xe1, 0xbc, 0xe2, 0x92, 0x78, 0x54, 0x8d, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, + 0x52, 0x7a, 0x58, 0xdc, 0xa4, 0x07, 0x76, 0x50, 0x90, 0x60, 0x5e, 0x71, 0x89, 0x13, 0xb2, 0xc1, + 0xc5, 0x4e, 0x6e, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, + 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x93, 0x9e, + 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x32, 0x52, 0xb7, 0xa2, 0xb2, 0x4a, + 0x1f, 0x11, 0x2c, 0x15, 0xb0, 0x80, 0x29, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x07, 0x88, + 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x28, 0x64, 0x99, 0x8c, 0x39, 0x01, 0x00, 0x00, +} + +func (m *NSTKV) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NSTKV) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NSTKV) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BalanceChange != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.BalanceChange)) + i-- + dAtA[i] = 0x10 + } + if m.StakerIndex != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.StakerIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RawDataNST) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawDataNST) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawDataNST) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NstBalanceChanges) > 0 { + for iNdEx := len(m.NstBalanceChanges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NstBalanceChanges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRawdataNst(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintRawdataNst(dAtA []byte, offset int, v uint64) int { + offset -= sovRawdataNst(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *NSTKV) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StakerIndex != 0 { + n += 1 + sovRawdataNst(uint64(m.StakerIndex)) + } + if m.BalanceChange != 0 { + n += 1 + sovRawdataNst(uint64(m.BalanceChange)) + } + return n +} + +func (m *RawDataNST) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NstBalanceChanges) > 0 { + for _, e := range m.NstBalanceChanges { + l = e.Size() + n += 1 + l + sovRawdataNst(uint64(l)) + } + } + return n +} + +func sovRawdataNst(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRawdataNst(x uint64) (n int) { + return sovRawdataNst(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *NSTKV) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NSTKV: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NSTKV: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StakerIndex", wireType) + } + m.StakerIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StakerIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BalanceChange", wireType) + } + m.BalanceChange = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.BalanceChange |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRawdataNst(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRawdataNst + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RawDataNST) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawDataNST: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawDataNST: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NstBalanceChanges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRawdataNst + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRawdataNst + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NstBalanceChanges = append(m.NstBalanceChanges, &NSTKV{}) + if err := m.NstBalanceChanges[len(m.NstBalanceChanges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRawdataNst(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRawdataNst + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRawdataNst(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRawdataNst + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRawdataNst + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRawdataNst + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRawdataNst = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRawdataNst = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRawdataNst = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/oracle/types/tx.pb.go b/x/oracle/types/tx.pb.go index 8863e5f0b..dabeaca69 100644 --- a/x/oracle/types/tx.pb.go +++ b/x/oracle/types/tx.pb.go @@ -31,6 +31,38 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +// AggregationPhase defines the aggregation phase of the message +type AggregationPhase int32 + +const ( + // MESSAGE_PHASE_NON is the default value, which means the message is not of type 2-phases aggregation + AggregationPhaseUnspecified AggregationPhase = 0 + // MESSAGE_PHASE_ONE is the first phase of the 2-phases aggregation + AggregationPhaseOne AggregationPhase = 1 + // MESSAGE_PHASE_TWO is the second phase of the 2-phases aggregation + AggregationPhaseTwo AggregationPhase = 2 +) + +var AggregationPhase_name = map[int32]string{ + 0: "AGGREGATION_PHASE_UNSPECIFIED", + 1: "AGGREGATION_PHASE_ONE", + 2: "AGGREGATION_PHASE_TWO", +} + +var AggregationPhase_value = map[string]int32{ + "AGGREGATION_PHASE_UNSPECIFIED": 0, + "AGGREGATION_PHASE_ONE": 1, + "AGGREGATION_PHASE_TWO": 2, +} + +func (x AggregationPhase) String() string { + return proto.EnumName(AggregationPhase_name, int32(x)) +} + +func (AggregationPhase) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_8a8b79b15e755ae2, []int{0} +} + // MsgCreatePrice provide the price updating message type MsgCreatePrice struct { // creator tells which is the message sender and should sign this message @@ -43,6 +75,9 @@ type MsgCreatePrice struct { BasedBlock uint64 `protobuf:"varint,4,opt,name=based_block,json=basedBlock,proto3" json:"based_block,omitempty"` // nonce represents the unique number to disginguish duplicated messages Nonce int32 `protobuf:"varint,5,opt,name=nonce,proto3" json:"nonce,omitempty"` + // true: this message includes data of {rawdata, proof} need to be verified based on consensused root + // false: this message includes data need to get consensus based on voting power + Phase AggregationPhase `protobuf:"varint,6,opt,name=phase,proto3,enum=imuachain.oracle.v1.AggregationPhase" json:"phase,omitempty"` } func (m *MsgCreatePrice) Reset() { *m = MsgCreatePrice{} } @@ -113,6 +148,13 @@ func (m *MsgCreatePrice) GetNonce() int32 { return 0 } +func (m *MsgCreatePrice) GetPhase() AggregationPhase { + if m != nil { + return m.Phase + } + return AggregationPhaseUnspecified +} + // MsgCreatePriceResponse type MsgCreatePriceResponse struct { } @@ -245,6 +287,7 @@ func (m *MsgUpdateParamsResponse) XXX_DiscardUnknown() { var xxx_messageInfo_MsgUpdateParamsResponse proto.InternalMessageInfo func init() { + proto.RegisterEnum("imuachain.oracle.v1.AggregationPhase", AggregationPhase_name, AggregationPhase_value) proto.RegisterType((*MsgCreatePrice)(nil), "imuachain.oracle.v1.MsgCreatePrice") proto.RegisterType((*MsgCreatePriceResponse)(nil), "imuachain.oracle.v1.MsgCreatePriceResponse") proto.RegisterType((*MsgUpdateParams)(nil), "imuachain.oracle.v1.MsgUpdateParams") @@ -254,41 +297,49 @@ func init() { func init() { proto.RegisterFile("imuachain/oracle/v1/tx.proto", fileDescriptor_8a8b79b15e755ae2) } var fileDescriptor_8a8b79b15e755ae2 = []byte{ - // 532 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xb1, 0x6f, 0xd3, 0x4e, - 0x18, 0xcd, 0xfd, 0xd2, 0xe4, 0xd7, 0x5c, 0x2a, 0x10, 0x47, 0x44, 0xdd, 0x00, 0x8e, 0x49, 0x19, - 0x42, 0x68, 0x6c, 0x35, 0x48, 0x15, 0x02, 0x09, 0x09, 0x83, 0x2a, 0x75, 0x88, 0x84, 0x5c, 0xc1, - 0xc0, 0x12, 0x9d, 0xed, 0xc3, 0xb1, 0x1a, 0xfb, 0xac, 0x3b, 0xa7, 0x4a, 0x18, 0x19, 0x99, 0xf8, - 0x33, 0x18, 0x33, 0xf4, 0x1f, 0x60, 0xab, 0x98, 0xaa, 0x4e, 0x4c, 0x15, 0x4a, 0x86, 0x88, 0xff, - 0x02, 0xf9, 0xce, 0x6e, 0x9a, 0xca, 0x48, 0x5d, 0x2c, 0x7f, 0xef, 0xbd, 0xef, 0xbe, 0xf7, 0xbd, - 0xd3, 0xc1, 0x07, 0x7e, 0x30, 0xc2, 0xce, 0x00, 0xfb, 0xa1, 0x41, 0x19, 0x76, 0x86, 0xc4, 0x38, - 0xde, 0x35, 0xe2, 0xb1, 0x1e, 0x31, 0x1a, 0x53, 0x74, 0xf7, 0x92, 0xd5, 0x25, 0xab, 0x1f, 0xef, - 0xd6, 0xef, 0xe0, 0xc0, 0x0f, 0xa9, 0x21, 0xbe, 0x52, 0x57, 0xdf, 0x74, 0x28, 0x0f, 0x28, 0x37, - 0x02, 0xee, 0x25, 0xfd, 0x01, 0xf7, 0x52, 0x62, 0x4b, 0x12, 0x7d, 0x51, 0x19, 0xb2, 0x48, 0xa9, - 0x9a, 0x47, 0x3d, 0x2a, 0xf1, 0xe4, 0x2f, 0x45, 0xb5, 0x3c, 0x3f, 0x11, 0x66, 0x38, 0xc8, 0xfa, - 0x1a, 0xb9, 0x0a, 0xe6, 0x3b, 0x44, 0x0a, 0x9a, 0x7f, 0x00, 0xbc, 0xd5, 0xe3, 0xde, 0x1b, 0x46, - 0x70, 0x4c, 0xde, 0x25, 0x04, 0x7a, 0x09, 0xff, 0x77, 0x92, 0x92, 0x32, 0x05, 0x68, 0xa0, 0x55, - 0x31, 0x1f, 0x9d, 0x9f, 0x74, 0x1e, 0xa6, 0x76, 0x3e, 0xe0, 0xa1, 0xef, 0x26, 0xdc, 0x6b, 0xd7, - 0x65, 0x84, 0xf3, 0xc3, 0x98, 0xf9, 0xa1, 0x67, 0x65, 0x1d, 0xe8, 0x09, 0xac, 0x7c, 0x22, 0xc4, - 0x25, 0xac, 0xef, 0xbb, 0xca, 0x7f, 0x1a, 0x68, 0xad, 0x99, 0x1b, 0xb3, 0x8b, 0xc6, 0xfa, 0xbe, - 0x00, 0x0f, 0xde, 0x5a, 0xeb, 0x92, 0x3e, 0x70, 0xd1, 0x73, 0x58, 0x16, 0x4e, 0xb8, 0x52, 0xd4, - 0x8a, 0xad, 0x6a, 0x57, 0xd3, 0x73, 0x02, 0xd4, 0x85, 0xa7, 0x43, 0x3a, 0x62, 0x0e, 0xb1, 0x52, - 0x3d, 0x6a, 0xc0, 0xaa, 0x8d, 0x39, 0x71, 0xfb, 0xf6, 0x90, 0x3a, 0x47, 0xca, 0x5a, 0x32, 0xc6, - 0x82, 0x02, 0x32, 0x13, 0x04, 0xd5, 0x60, 0x29, 0xa4, 0xa1, 0x43, 0x94, 0x92, 0x06, 0x5a, 0x25, - 0x4b, 0x16, 0x4d, 0x05, 0xde, 0x5b, 0x5d, 0xd5, 0x22, 0x3c, 0xa2, 0x21, 0x27, 0xcd, 0x1f, 0x00, - 0xde, 0xee, 0x71, 0xef, 0x7d, 0xe4, 0x26, 0x94, 0x08, 0x10, 0xed, 0xc1, 0x0a, 0x1e, 0xc5, 0x03, - 0xca, 0xfc, 0x78, 0x92, 0x06, 0xa1, 0x9c, 0x9f, 0x74, 0x6a, 0x69, 0x10, 0xab, 0xfb, 0x2f, 0xa5, - 0xe8, 0x15, 0x2c, 0xcb, 0x2b, 0x10, 0xeb, 0x57, 0xbb, 0xf7, 0xf3, 0xd7, 0x12, 0x12, 0xb3, 0x72, - 0x7a, 0xd1, 0x28, 0x7c, 0x5f, 0x4c, 0xdb, 0xc0, 0x4a, 0xbb, 0x5e, 0xec, 0x7d, 0x59, 0x4c, 0xdb, - 0xcb, 0xf3, 0xbe, 0x2e, 0xa6, 0xed, 0x6d, 0x39, 0xb3, 0xc3, 0xdd, 0x23, 0x63, 0x9c, 0x5d, 0xe4, - 0x35, 0xbf, 0xcd, 0x2d, 0xb8, 0x79, 0x0d, 0xca, 0xd6, 0xeb, 0xfe, 0x04, 0xb0, 0xd8, 0xe3, 0x1e, - 0xea, 0xc3, 0xea, 0xd5, 0x8b, 0xde, 0xce, 0x75, 0xb6, 0x1a, 0x51, 0xfd, 0xe9, 0x0d, 0x44, 0xd9, - 0x20, 0x64, 0xc3, 0x8d, 0x95, 0x0c, 0x1f, 0xff, 0xab, 0xf9, 0xaa, 0xaa, 0xbe, 0x73, 0x13, 0x55, - 0x36, 0xc3, 0xdc, 0x3f, 0x9d, 0xa9, 0xe0, 0x6c, 0xa6, 0x82, 0xdf, 0x33, 0x15, 0x7c, 0x9b, 0xab, - 0x85, 0xb3, 0xb9, 0x5a, 0xf8, 0x35, 0x57, 0x0b, 0x1f, 0x77, 0x3c, 0x3f, 0x1e, 0x8c, 0x6c, 0xdd, - 0xa1, 0x81, 0x91, 0x9c, 0xd8, 0x19, 0x4f, 0x3e, 0x1b, 0xcb, 0x07, 0x70, 0x99, 0x5c, 0x3c, 0x89, - 0x08, 0xb7, 0xcb, 0xe2, 0x01, 0x3c, 0xfb, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x87, 0x39, 0xe3, - 0xd5, 0x03, 0x00, 0x00, + // 662 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x4d, 0x4f, 0x13, 0x4f, + 0x18, 0xef, 0x00, 0xed, 0x9f, 0x4e, 0x09, 0xff, 0x3a, 0xa0, 0x2c, 0x8b, 0x6c, 0xd7, 0xa2, 0x49, + 0xad, 0xb4, 0x1b, 0x6a, 0x42, 0x8c, 0x24, 0x26, 0x2d, 0x14, 0xec, 0x01, 0xda, 0x6c, 0x41, 0x13, + 0x2f, 0xcd, 0x76, 0x77, 0xd8, 0x6e, 0x60, 0x77, 0x36, 0x33, 0x5b, 0x04, 0x8f, 0x9e, 0x0c, 0x27, + 0xbf, 0x00, 0x27, 0x13, 0xe3, 0x91, 0x03, 0x5f, 0xc0, 0xc4, 0x03, 0xf1, 0x44, 0x38, 0x79, 0x22, + 0xa6, 0x1c, 0xf8, 0x1a, 0x66, 0xdf, 0x80, 0xd6, 0x35, 0xe1, 0xd2, 0xf4, 0xf9, 0xbd, 0xcc, 0xf3, + 0xec, 0x6f, 0x9e, 0x0c, 0x7c, 0x68, 0x98, 0x5d, 0x45, 0xed, 0x28, 0x86, 0x25, 0x11, 0xaa, 0xa8, + 0xbb, 0x58, 0xda, 0x5b, 0x90, 0x9c, 0xfd, 0xa2, 0x4d, 0x89, 0x43, 0xd0, 0xc4, 0x35, 0x5b, 0xf4, + 0xd9, 0xe2, 0xde, 0x02, 0x7f, 0x4f, 0x31, 0x0d, 0x8b, 0x48, 0xde, 0xaf, 0xaf, 0xe3, 0xa7, 0x54, + 0xc2, 0x4c, 0xc2, 0x24, 0x93, 0xe9, 0xae, 0xdf, 0x64, 0x7a, 0x40, 0x4c, 0xfb, 0x44, 0xcb, 0xab, + 0x24, 0xbf, 0x08, 0xa8, 0x49, 0x9d, 0xe8, 0xc4, 0xc7, 0xdd, 0x7f, 0x01, 0x2a, 0x46, 0xcd, 0x63, + 0x2b, 0x54, 0x31, 0x43, 0x5f, 0x26, 0x52, 0x41, 0x0d, 0x15, 0xfb, 0x82, 0xec, 0xd7, 0x21, 0x38, + 0xbe, 0xce, 0xf4, 0x65, 0x8a, 0x15, 0x07, 0x37, 0x5c, 0x02, 0x2d, 0xc1, 0xff, 0x54, 0xb7, 0x24, + 0x94, 0x03, 0x22, 0xc8, 0x25, 0x2b, 0x8f, 0xce, 0x4f, 0x0a, 0xb3, 0xc1, 0x38, 0x6f, 0x94, 0x5d, + 0x43, 0x73, 0xb9, 0xb2, 0xa6, 0x51, 0xcc, 0x58, 0xd3, 0xa1, 0x86, 0xa5, 0xcb, 0xa1, 0x03, 0x3d, + 0x85, 0xc9, 0x6d, 0x8c, 0x35, 0x4c, 0x5b, 0x86, 0xc6, 0x0d, 0x89, 0x20, 0x37, 0x52, 0x19, 0xeb, + 0x5d, 0x64, 0x46, 0x57, 0x3d, 0xb0, 0xb6, 0x22, 0x8f, 0xfa, 0x74, 0x4d, 0x43, 0x2f, 0x60, 0xc2, + 0x9b, 0x84, 0x71, 0xc3, 0xe2, 0x70, 0x2e, 0x55, 0x12, 0x8b, 0x11, 0x01, 0x16, 0xbd, 0x99, 0x9a, + 0xa4, 0x4b, 0x55, 0x2c, 0x07, 0x7a, 0x94, 0x81, 0xa9, 0xb6, 0xc2, 0xb0, 0xd6, 0x6a, 0xef, 0x12, + 0x75, 0x87, 0x1b, 0x71, 0xdb, 0xc8, 0xd0, 0x83, 0x2a, 0x2e, 0x82, 0x26, 0x61, 0xdc, 0x22, 0x96, + 0x8a, 0xb9, 0xb8, 0x08, 0x72, 0x71, 0xd9, 0x2f, 0xd0, 0x12, 0x8c, 0xdb, 0x1d, 0x85, 0x61, 0x2e, + 0x21, 0x82, 0xdc, 0x78, 0xe9, 0x49, 0x64, 0xbf, 0xb2, 0xae, 0x53, 0xac, 0x2b, 0x8e, 0x41, 0xac, + 0x86, 0x2b, 0x96, 0x7d, 0x4f, 0x96, 0x83, 0x0f, 0xfa, 0x73, 0x92, 0x31, 0xb3, 0x89, 0xc5, 0x70, + 0xf6, 0x3b, 0x80, 0xff, 0xaf, 0x33, 0x7d, 0xcb, 0xd6, 0x5c, 0xca, 0x4b, 0x1f, 0x2d, 0xc2, 0xa4, + 0xd2, 0x75, 0x3a, 0x84, 0x1a, 0xce, 0x41, 0x90, 0x22, 0x77, 0x7e, 0x52, 0x98, 0x0c, 0x52, 0xec, + 0x0f, 0xef, 0x46, 0x8a, 0x5e, 0xc1, 0x84, 0x7f, 0x7f, 0x5e, 0x76, 0xa9, 0xd2, 0x4c, 0x74, 0x26, + 0x9e, 0xa4, 0x92, 0x3c, 0xbd, 0xc8, 0xc4, 0xbe, 0x5d, 0x1d, 0xe7, 0x81, 0x1c, 0xb8, 0x5e, 0x2e, + 0x7e, 0xbc, 0x3a, 0xce, 0xdf, 0x9c, 0x77, 0x78, 0x75, 0x9c, 0x9f, 0xf3, 0x7b, 0x16, 0x98, 0xb6, + 0x23, 0xed, 0x87, 0x5b, 0x30, 0x30, 0x6f, 0x76, 0x1a, 0x4e, 0x0d, 0x40, 0xe1, 0xe7, 0xe5, 0x7f, + 0x00, 0x98, 0x1e, 0x0c, 0x05, 0x55, 0xe0, 0x6c, 0x79, 0x6d, 0x4d, 0xae, 0xae, 0x95, 0x37, 0x6b, + 0xf5, 0x8d, 0x56, 0xe3, 0x75, 0xb9, 0x59, 0x6d, 0x6d, 0x6d, 0x34, 0x1b, 0xd5, 0xe5, 0xda, 0x6a, + 0xad, 0xba, 0x92, 0x8e, 0xf1, 0x99, 0xc3, 0x23, 0x71, 0x66, 0xd0, 0xb8, 0x65, 0x31, 0x1b, 0xab, + 0xc6, 0xb6, 0x81, 0x35, 0x54, 0x82, 0xf7, 0xff, 0x3e, 0xa3, 0xbe, 0x51, 0x4d, 0x03, 0x7e, 0xea, + 0xf0, 0x48, 0x9c, 0x18, 0xf4, 0xd6, 0x2d, 0x1c, 0xed, 0xd9, 0x7c, 0x5b, 0x4f, 0x0f, 0x45, 0x7b, + 0x36, 0xdf, 0x13, 0x7e, 0xe4, 0xd3, 0x17, 0x21, 0x56, 0xfa, 0x09, 0xe0, 0xf0, 0x3a, 0xd3, 0x51, + 0x0b, 0xa6, 0x6e, 0x2f, 0xfb, 0x5c, 0x64, 0xc0, 0xfd, 0x37, 0xcd, 0x3f, 0xbb, 0x83, 0x28, 0xcc, + 0x0b, 0xb5, 0xe1, 0x58, 0xdf, 0x2a, 0x3c, 0xfe, 0x97, 0xf9, 0xb6, 0x8a, 0x9f, 0xbf, 0x8b, 0x2a, + 0xec, 0x51, 0x59, 0x3d, 0xed, 0x09, 0xe0, 0xac, 0x27, 0x80, 0xdf, 0x3d, 0x01, 0x7c, 0xbe, 0x14, + 0x62, 0x67, 0x97, 0x42, 0xec, 0xd7, 0xa5, 0x10, 0x7b, 0x37, 0xaf, 0x1b, 0x4e, 0xa7, 0xdb, 0x2e, + 0xaa, 0xc4, 0x94, 0xdc, 0x13, 0x0b, 0xfb, 0x07, 0x1f, 0xa4, 0x9b, 0x47, 0xe0, 0x7a, 0x01, 0x9c, + 0x03, 0x1b, 0xb3, 0x76, 0xc2, 0x7b, 0x04, 0x9e, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x03, + 0xa1, 0xf9, 0xd9, 0x04, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -431,6 +482,11 @@ func (m *MsgCreatePrice) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.Phase != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.Phase)) + i-- + dAtA[i] = 0x30 + } if m.Nonce != 0 { i = encodeVarintTx(dAtA, i, uint64(m.Nonce)) i-- @@ -592,6 +648,9 @@ func (m *MsgCreatePrice) Size() (n int) { if m.Nonce != 0 { n += 1 + sovTx(uint64(m.Nonce)) } + if m.Phase != 0 { + n += 1 + sovTx(uint64(m.Phase)) + } return n } @@ -786,6 +845,25 @@ func (m *MsgCreatePrice) Unmarshal(dAtA []byte) error { break } } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + m.Phase = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Phase |= AggregationPhase(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTx(dAtA[iNdEx:]) diff --git a/x/oracle/types/types.go b/x/oracle/types/types.go index 4383aeb67..0392fc0a1 100644 --- a/x/oracle/types/types.go +++ b/x/oracle/types/types.go @@ -1,12 +1,66 @@ package types import ( + "bytes" "encoding/binary" sdkmath "cosmossdk.io/math" sdk "github.com/cosmos/cosmos-sdk/types" ) +type PieceWithProof struct { + Index uint32 + RawData []byte + // Proof []*HashNode + Proof Proof + // reference to the tx including this piece + Tx sdk.Tx +} + +func (p *PieceWithProof) ProofSize() uint32 { + return uint32(len(p.Proof)) +} + +func (p *PieceWithProof) HasIndexOnProofPath(index uint32) bool { + for _, pn := range p.Proof { + if index == pn.Index { + return true + } + } + return false +} + +func (p *PieceWithProof) EqualsTo(p2 *PieceWithProof) bool { + if p.Index != p2.Index { + return false + } + if !bytes.Equal(p.RawData, p2.RawData) { + return false + } + if len(p.Proof) != len(p2.Proof) { + return false + } + + // we require these to be exactly the same(same order) which is identical with anteHandler proofPath check + for i, pn := range p.Proof { + if pn.Index != p2.Proof[i].Index { + return false + } + if !bytes.Equal(pn.Hash, p2.Proof[i].Hash) { + return false + } + } + return true +} + +// MsgCreatePriceRawData defined as alias of MsgCreatePrice with rawData related method to get rid of redundant checking wehter the MsgCreatePrice is with a valid RawData message +// TODO: add filed 'parsed' into this struct to avoid redundant parse +// type MsgCreatePriceRawData MsgCreatePrice +type MsgCreatePriceRawData struct { + *MsgCreatePrice + Piece *PieceWithProof +} + type OracleInfo struct { Chain struct { Name string @@ -64,11 +118,15 @@ const ( SourceChainlinkName = "Chainlink" SourceChainlinkID = 1 - TimeLayout = "2006-01-02 15:04:05" + + RuleIDAll = 1 + TimeLayout = "2006-01-02 15:04:05" DelimiterForCombinedKey = byte('/') NilDetID = "" + + DelimiterForBase64 = "|" ) var ( @@ -92,6 +150,20 @@ func Uint64Bytes(value uint64) []byte { return valueBytes } +func BytesToUint64(bz []byte) uint64 { + return binary.BigEndian.Uint64(bz) +} + +func Uint32Bytes(value uint32) []byte { + valueBytes := make([]byte, 4) + binary.BigEndian.PutUint32(valueBytes, value) + return valueBytes +} + +func BytesToUint32(bz []byte) uint32 { + return binary.BigEndian.Uint32(bz) +} + func ConsAddrStrFromCreator(creator string) (string, error) { accAddress, err := sdk.AccAddressFromBech32(creator) if err != nil { From acfabb21316ad305122c1ca579f774c9ee692ef5 Mon Sep 17 00:00:00 2001 From: leonz789 Date: Fri, 28 Feb 2025 01:59:26 +0800 Subject: [PATCH 2/2] update components update nst_post_handler, use balance to replace balance-change remove nst balance change cap to 'unlimit'(uint64) balance recovery for 2nd phase data --- local_node.sh | 4 +- proto/imuachain/oracle/v1/rawdata_nst.proto | 5 +- proto/imuachain/oracle/v1/two_phases.proto | 33 + x/oracle/keeper/common/expected_keepers.go | 12 + x/oracle/keeper/common/two_phases.go | 3 +- x/oracle/keeper/feedermanagement/caches.go | 3 +- .../keeper/feedermanagement/feedermanager.go | 140 +- .../feedermanagement/feedermanager_test.go | 4 +- .../keeper/feedermanagement/helper_test.go | 4 +- x/oracle/keeper/feedermanagement/round.go | 13 +- x/oracle/keeper/feedermanagement/types.go | 16 +- x/oracle/keeper/msg_server_price_feed.go | 8 +- x/oracle/keeper/nst_post_aggregation.go | 174 +-- x/oracle/keeper/post_aggregation.go | 15 +- x/oracle/keeper/prices.go | 46 +- x/oracle/keeper/two_phases.go | 218 +++- x/oracle/types/key_two_phase.go | 54 +- x/oracle/types/merkletree.go | 37 +- x/oracle/types/message_create_price.go | 4 + x/oracle/types/params.go | 17 +- x/oracle/types/rawdata_nst.pb.go | 84 +- x/oracle/types/two_phases.pb.go | 1145 +++++++++++++++++ 22 files changed, 1761 insertions(+), 278 deletions(-) create mode 100644 proto/imuachain/oracle/v1/two_phases.proto create mode 100644 x/oracle/types/two_phases.pb.go diff --git a/local_node.sh b/local_node.sh index 1509cef9a..fe12efb31 100755 --- a/local_node.sh +++ b/local_node.sh @@ -244,8 +244,8 @@ imua: grpc: 127.0.0.1:9090 ws: !!str ws://127.0.0.1:26657/websocket rpc: !!str http://127.0.0.1:26657 -debugger: - grpc: !!str :50051 +#debugger: +# grpc: !!str :50051 EOF ) diff --git a/proto/imuachain/oracle/v1/rawdata_nst.proto b/proto/imuachain/oracle/v1/rawdata_nst.proto index 0fa60583e..f072224c3 100644 --- a/proto/imuachain/oracle/v1/rawdata_nst.proto +++ b/proto/imuachain/oracle/v1/rawdata_nst.proto @@ -9,11 +9,12 @@ message NSTKV { // staker index for a nst defined on imuachain side uint32 staker_index = 1; // balance change since last update - int64 balance_change = 2; + int64 balance = 2; } // RawDataNST represents balance changes of all stakers for a NST message RawDataNST { + uint64 version = 1; // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed - repeated NSTKV nst_balance_changes = 1; + repeated NSTKV nst_balance_changes = 2; } diff --git a/proto/imuachain/oracle/v1/two_phases.proto b/proto/imuachain/oracle/v1/two_phases.proto new file mode 100644 index 000000000..63b51a15c --- /dev/null +++ b/proto/imuachain/oracle/v1/two_phases.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; +package imuachain.oracle.v1; + +option go_package = "github.com/imua-xyz/imuachain/x/oracle/types"; + +// Nonce is a message that contains a nonce for a feeder +message ValidatorIndex { + // FeederID is the ID of the feeder that corresponding to the nonce + string validator = 1; + // value is the nonce value + uint32 next_index = 2; +} + +// ValidatorNonce is a message that contains the nonces for a validator +message FeederValidatorsIndex{ + // nonces is the list of nonces for the feeders + repeated ValidatorIndex validator_index_list= 2; +} + +message HashNode { + uint32 index = 1; + bytes hash = 2; +} + +// Proof represents all hash nodes of a Mekle tree with indexes +message FlattenTree{ + repeated HashNode nodes = 1; +} + +message TreeInfo { + uint32 leaf_count = 1; + bytes root_hash = 2; +} diff --git a/x/oracle/keeper/common/expected_keepers.go b/x/oracle/keeper/common/expected_keepers.go index 0e83e37ed..15c45c6d4 100644 --- a/x/oracle/keeper/common/expected_keepers.go +++ b/x/oracle/keeper/common/expected_keepers.go @@ -6,6 +6,7 @@ import ( sdkmath "cosmossdk.io/math" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/cosmos-sdk/codec" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" dogfoodkeeper "github.com/imua-xyz/imuachain/x/dogfood/keeper" @@ -71,6 +72,17 @@ type KeeperOracle interface { SetNonce(ctx sdk.Context, nonce types.ValidatorNonce) GetSpecifiedAssetsPrice(ctx sdk.Context, assetID string) (types.Price, error) GetMultipleAssetsPrices(ctx sdk.Context, assetIDs map[string]interface{}) (map[string]types.Price, error) + + Setup2ndPhase(ctx sdk.Context, feederID uint64, validators []string, leafCount uint32, rootHash []byte) + Clear2ndPhase(ctx sdk.Context, feederID uint64, rootIndex uint32) + AddNodesToMerkleTree(ctx sdk.Context, feederID uint64, proof []*types.HashNode) + SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, pieceIndex uint32) + GetPostAggregation(feederID int64) (handler PostAggregationHandler, found bool) + SetRawDataPiece(ctx sdk.Context, feederID uint64, pieceIndex uint32, rawData []byte) + GetRawDataPieces(ctx sdk.Context, feederID uint64) ([][]byte, error) + GetFeederTreeInfo(ctx sdk.Context, feederID uint64) (uint32, []byte) + GetNodesFromMerkleTree(ctx sdk.Context, feederID uint64) []*types.HashNode + MustUnmarshal(bz []byte, ptr codec.ProtoMarshaler) } var _ KeeperDogfood = dogfoodkeeper.Keeper{} diff --git a/x/oracle/keeper/common/two_phases.go b/x/oracle/keeper/common/two_phases.go index 60a2f9ff8..167aa2b9a 100644 --- a/x/oracle/keeper/common/two_phases.go +++ b/x/oracle/keeper/common/two_phases.go @@ -5,4 +5,5 @@ import ( ) // the input data could be either rawData bytes of data with big size for non-price senarios or 'price' info -type PostAggregationHandler func(data []byte, ctx sdk.Context, k KeeperOracle) error +// type PostAggregationHandler func(data []byte, ctx sdk.Context, k KeeperOracle) error +type PostAggregationHandler func(ctx sdk.Context, data []byte, feederID, roundID uint64, k KeeperOracle) error diff --git a/x/oracle/keeper/feedermanagement/caches.go b/x/oracle/keeper/feedermanagement/caches.go index f4d1e73c2..5990fa324 100644 --- a/x/oracle/keeper/feedermanagement/caches.go +++ b/x/oracle/keeper/feedermanagement/caches.go @@ -9,7 +9,6 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ethereum/go-ethereum/common" - "github.com/imua-xyz/imuachain/x/oracle/types" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) @@ -127,7 +126,7 @@ func (c *caches) IsRule2PhasesByFeederID(feederID uint64) bool { return c.isRule2PhasesByRule(rule) } -func (c *caches) isRule2PhasesByRule(rule *types.RuleSource) bool { +func (c *caches) isRule2PhasesByRule(rule *oracletypes.RuleSource) bool { // just check the format and don't care the verification here, the verification should be done by 'params' not in this memory calculator(feedermanager) if len(rule.SourceIDs) == 1 && rule.SourceIDs[0] == 0 && rule.Nom != nil && len(rule.Nom.SourceIDs) == 1 { diff --git a/x/oracle/keeper/feedermanagement/feedermanager.go b/x/oracle/keeper/feedermanagement/feedermanager.go index 672ca142f..cdd728af9 100644 --- a/x/oracle/keeper/feedermanagement/feedermanager.go +++ b/x/oracle/keeper/feedermanagement/feedermanager.go @@ -65,8 +65,7 @@ func (f *FeederManager) BeginBlock(ctx sdk.Context) (recovered bool) { // if the cache is nil and we are not in recovery mode, init the caches if f.cs == nil { var err error - recovered, err = f.recovery(ctx) - // it's safe to panic since this will only happen when the node is starting with something wrong in the store + recovered, err = f.recovery(ctx) // it's safe to panic since this will only happen when the node is starting with something wrong in the store if err != nil { panic(err) } @@ -284,19 +283,57 @@ func (f *FeederManager) commitRounds(ctx sdk.Context) { // #nosec G115 // tokenID is index of slice if updated := f.k.AppendPriceTR(ctx, uint64(r.tokenID), *priceCommit, finalPrice.DetID); !updated { - // failed to append price due to roundID gap, and this is a 'should-not-happen' case - f.k.GrowRoundID(ctx, uint64(r.tokenID), uint64(r.roundID)) + // this is an 'impossible' case, we should not reach here + latestPrice, latestRoundID := f.k.GrowRoundID(ctx, uint64(r.tokenID), uint64(r.roundID)) + logger.Error("failed to append price due to roundID gap and update this round with GrowRoundID", "feederID", r.feederID, "try-to-update-roundID", r.roundID, "try-to-update-price", priceCommit, "restul-latestPrice", latestPrice, "result-latestRoundID", latestRoundID) + } else { + fstr := strconv.FormatInt(feederID, 10) + successFeederIDs = append(successFeederIDs, fstr) + + // set up for 2-phases aggregation + if r.twoPhases { + // no more validation check, they've all been done by previous process + lc, _ := strconv.ParseUint(finalPrice.DetID, 10, 32) + // set up mem-round for 2nd phase aggregation + r.m = oracletypes.NewMT(f.cs.RawDataPieceSize(), uint32(lc), []byte(finalPrice.Price)) + // set up state for 2nd phase aggregation + // #nosec G115 + f.k.Setup2ndPhase(ctx, uint64(r.feederID), f.cs.GetValidators(), uint32(lc), []byte(finalPrice.Price)) + } } - - fstr := strconv.FormatInt(feederID, 10) - successFeederIDs = append(successFeederIDs, fstr) // there's no valid price for any round yet } else { logger.Error("We currently only support rules under oracle V1: only allow price from source Chainlink", "feederID", r.feederID) } } // keep aggregator for possible 'handlQuotingMisBehavior' at quotingWindowEnd r.status = roundStatusClosed + } else if r.twoPhases { + // check if r is 2-phases and rawData is completed, for 2nd-phase, the status of round must be closed + if r.m.CollectingRawData() { + if len(r.cachedProofForBlock) > 0 { + // #nosec G115 + f.k.AddNodesToMerkleTree(ctx, uint64(r.feederID), r.cachedProofForBlock) + // reset cachedProofForBlock after commit to sate + r.cachedProofForBlock = nil + } + if LatestLeafIndex, ok := r.m.LatestLeafIndex(); ok { + // #nosec G115 + f.k.SetNextPieceIndexForFeeder(ctx, uint64(r.feederID), LatestLeafIndex+1) + } + } else if rawData, ok := r.m.CompleteRawData(); ok { + // execute postHandler with rawData + if err := r.h(ctx, rawData, uint64(r.feederID), uint64(r.roundID), f.k); err != nil { + // just log the error and wait for next round to update + // TODO(leonz): this suites for NST, we can just wait for next round to update, but does it suites for commmon case ? should we do some other postHandling for this fail when it's not of NST case? + logger.Error("failed to execute postHandler for 2phases aggregation on consensus price", "feederID", r.feederID, "roundID", r.roundID, "consensus 1st-phase hash:%s", hex.EncodeToString(r.m.RootHash())) + } + // reset related cache from state + // #nosec G115 + f.k.Clear2ndPhase(ctx, uint64(r.feederID), r.m.RootIndex()) + r.m = nil + } } + // close all quotingWindow to skip current rounds' 'handlQuotingMisBehavior' if f.forceSeal { r.closeQuotingWindow() @@ -532,7 +569,8 @@ func (f *FeederManager) updateRoundsParamsAndAddNewRounds(ctx sdk.Context) { logger.Info("[mem] add new round", "feederID", feederID, "height", height) f.sortedFeederIDs = append(f.sortedFeederIDs, feederID) twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(feederID)) - f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) + ph, _ := f.k.GetPostAggregation(feederID) + f.rounds[feederID] = newRound(feederID, tokenFeeder, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases, ph) } } f.sortedFeederIDs.sort() @@ -663,6 +701,9 @@ func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { } } + if f.cs.IsRule2PhasesByFeederID(msg.FeederID) && msg.IsNotTwoPhases() { + return fmt.Errorf("feederID:%d is configured for 2-phases aggregation, but the message is not of 2-phases", msg.FeederID) + } // extra check for message as 1st phase for 2-phases aggregation if msg.IsPhaseOne() { if len(msg.Prices) != 1 { @@ -687,10 +728,11 @@ func (f *FeederManager) ValidateMsg(msg *oracletypes.MsgCreatePrice) error { } // we wait one more maxNonce blocks to make sure proposer getting expected txs in their mempool + // we don't use the last block of current round(which is the baseBlock of the next round), so the quotingWindow for 2nd-phase message is from [baseBlock+2*maxNonce, nextBaseBlock-1] // #nosec G115 // maxNonce is positive windowForPhaseTwo := f.cs.IntervalForFeederID(msg.FeederID) - uint64(f.cs.GetMaxNonce())*2 - if leafCount > windowForPhaseTwo { - return fmt.Errorf("2-phases aggregation for feederID:%d, should have detID less than or equal to %d", msg.FeederID, windowForPhaseTwo) + if leafCount < 1 || leafCount > windowForPhaseTwo { + return fmt.Errorf("2-phases aggregation for feederID:%d, should have detID less than or equal to %d and be at least 1, got%d", msg.FeederID, windowForPhaseTwo, leafCount) } } return nil @@ -713,6 +755,26 @@ func (f *FeederManager) ProcessQuote(ctx sdk.Context, msg *oracletypes.MsgCreate return nil, fmt.Errorf("round not exists for feederID:%d, proposer:%s", msgItem.FeederID, msgItem.Validator) } + // TODO(leonz): remove this ? + if !r.twoPhases != msg.IsNotTwoPhases() { + // this should not happen, since message itself had been checked in 'validateMsg', when came to here it means there' something wront in 'round' initialization + return nil, fmt.Errorf("the 2phases status of round and message is mismatched, there's got something wrong with mem-round initialzation, feederID:%d", msg.FeederID) + } + + if msg.IsPhaseTwo() { + // either there's no consensus price from 1st phase or the 2nd phase had collected all pieces, this condition will be true and we will reject the transaction + // also we don't record any 'miss' count under this same condition + if r.m == nil || r.m.Completed() { + return nil, fmt.Errorf("message with 2-nd phase for feederID:%d of round_%d is reject since that round is not collecting raw data", msg.FeederID, r.roundID) + } + + // #nosec G115 + if uint64(ctx.BlockHeight()) < r.roundPhaseTwoStartBlock { + return nil, fmt.Errorf("message with 2-nd phase for feederID:%d of round_%d can only be accept at block height of at least %d", msg.FeederID, r.roundID, r.roundPhaseTwoStartBlock) + } + + } + // #nosec G115 // baseBlock is block height which is not negative if valid := r.ValidQuotingBaseBlock(int64(msg.BasedBlock)); !valid { return nil, fmt.Errorf("failed to process price-feed msg for feederID:%d, round is quoting:%t,quotingWindow is open:%t, expected baseBlock:%d, got baseBlock:%d", msgItem.FeederID, r.IsQuoting(), r.IsQuotingWindowOpen(), r.roundBaseBlock, msg.BasedBlock) @@ -839,8 +901,10 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { continue } tfID := int64(tfID) + // #nosec G115 // safe conversion twoPhases := f.cs.IsRule2PhasesByFeederID(uint64(tfID)) - f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases) + postHandler, _ := f.k.GetPostAggregation(tfID) + f.rounds[tfID] = newRound(tfID, tf, int64(params.MaxNonce), f.cs, NewAggMedian(), twoPhases, postHandler) f.sortedFeederIDs.add(tfID) } f.prepareRounds(ctxReplay) @@ -878,10 +942,36 @@ func (f *FeederManager) recovery(ctx sdk.Context) (bool, error) { f.cs.SkipCommit() + pieceSize := f.cs.RawDataPieceSize() + // recovery for 2nd-phase state + for _, r := range f.rounds { + if r.twoPhases { + //reset r.m from state + // #nosec G115 + feederID := uint64(r.feederID) + leafCount, rootHash := f.k.GetFeederTreeInfo(ctx, uint64(r.feederID)) + if leafCount == 0 { + continue + } + r.m = oracletypes.NewMT(pieceSize, leafCount, rootHash) + // rawdata + rawDataPieces, err := f.k.GetRawDataPieces(ctx, feederID) + if err != nil { + return false, err + } + r.m.SetRawDataPieces(rawDataPieces) + // proof nodes + // #nosec G115 + nodes := f.k.GetNodesFromMerkleTree(ctx, uint64(r.feederID)) + r.m.SetProofNodes(nodes) + } + } + return true, nil } func (f *FeederManager) RoundIDToBaseBlock(feederID, roundID uint64) (uint64, bool) { + // #nosec G115 r, ok := f.rounds[int64(feederID)] if !ok { return 0, false @@ -891,7 +981,7 @@ func (f *FeederManager) RoundIDToBaseBlock(feederID, roundID uint64) (uint64, bo // BaseBlockToRoundID returns the roundID which the input baseblock indicates to, it is different to the roundID of which this baseBlock BelongsTo (+1) func (f *FeederManager) BaseBlockToNextRoundID(feederID, baseBlock uint64) (uint64, bool) { - //TODO(leonz): use uint64 as f.rounds key + // TODO(leonz): use uint64 as f.rounds key // #nosec G115 r, ok := f.rounds[int64(feederID)] if !ok { @@ -996,34 +1086,42 @@ func getProtoMsgItemFromQuote(msg *oracletypes.MsgCreatePrice) *oracletypes.MsgI } } -func (f *FeederManager) ProcessRawData(msg *oracletypes.MsgCreatePrice) error { +// ProcessRawData verify the submitted piece of rawData with proof against the expected root and cached the result if it passded the verification +// return (cached rawData piece, error) +func (f *FeederManager) ProcessRawData(msg *oracletypes.MsgCreatePrice) ([]byte, error) { + if err := f.ValidateMsg(msg); err != nil { + return nil, oracletypes.ErrInvalidMsg.Wrap(err.Error()) + } piece, ok := f.GetPieceWithProof(msg) if !ok { - return errors.New("failed to parse rawdata piece from message") + return nil, errors.New("failed to parse rawdata piece from message") } // #nosec G115 r, ok := f.rounds[int64(msg.FeederID)] if !ok { // this should not happen - return fmt.Errorf("round for feederID:%d not exists", msg.FeederID) + return nil, fmt.Errorf("round for feederID:%d not exists", msg.FeederID) } - if r.m != nil { - return fmt.Errorf("feederID %d is not collecting rawData", msg.FeederID) + if r.m == nil { + return nil, fmt.Errorf("feederID %d is not collecting rawData", msg.FeederID) } // we don't check the 1st return value to see if this input proof is of the minimal, that's the duty of anteHandler, and 'verified' pieceWithProof will not fail the tx execution - _, ok = r.m.VerifyAndCache(piece.Index, piece.RawData, piece.Proof) + cachedProof, ok := r.m.VerifyAndCache(piece.Index, piece.RawData, piece.Proof) if !ok { - return fmt.Errorf("failed to verify piece of index %d provided within message for feederID:%d against root:%s", piece.Index, msg.FeederID, hex.EncodeToString(r.m.RootHash())) + return nil, fmt.Errorf("failed to verify piece of index %d provided within message for feederID:%d against root:%s", piece.Index, msg.FeederID, hex.EncodeToString(r.m.RootHash())) + } + // we don't need to cache the proof for state updating if the merkle tree have collected all rawData + if !r.m.Completed() { + r.cachedProofForBlock = append(r.cachedProofForBlock, cachedProof...) } // we don't do no state update in tx exexuting, the postHandler and all state update will be handled in EndBlock // // post handle rawData registered for the feederID // // clear all caching pieces from stateDB - // // // remove/reset merkleTree // // remove merkleTree // persist piece for recovery (with memory-cache update into merkleTree) // save this piece and proof to db for recovery, for nodes without running, // this process only causes additional: two write to stateDB(piece, proof), one read from the stateDB(piece) - return nil + return piece.RawData, nil } diff --git a/x/oracle/keeper/feedermanagement/feedermanager_test.go b/x/oracle/keeper/feedermanagement/feedermanager_test.go index 954c7f22b..c1c80cdcc 100644 --- a/x/oracle/keeper/feedermanagement/feedermanager_test.go +++ b/x/oracle/keeper/feedermanagement/feedermanager_test.go @@ -26,14 +26,14 @@ func TestFeederManagement(t *testing.T) { ps2 := ps1 fm2 := *fm - fm.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian, false, nil) fm.rounds[1].PrepareForNextBlock(20) fm.sortedFeederIDs.add(1) fm.rounds[1].a.ds.AddPriceSource(&ps1, big.NewInt(1), "v1") fm2.rounds = make(map[int64]*round) fm2.sortedFeederIDs = make([]int64, 0) - fm2.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian) + fm2.rounds[1] = newRound(1, testdata.DefaultParamsForTest().TokenFeeders[1], 3, c, defaultAggMedian, false, nil) fm2.rounds[1].PrepareForNextBlock(20) fm2.sortedFeederIDs.add(1) fm2.rounds[1].a.ds.AddPriceSource(&ps2, big.NewInt(1), "v1") diff --git a/x/oracle/keeper/feedermanagement/helper_test.go b/x/oracle/keeper/feedermanagement/helper_test.go index 4f70b4611..d30112632 100644 --- a/x/oracle/keeper/feedermanagement/helper_test.go +++ b/x/oracle/keeper/feedermanagement/helper_test.go @@ -98,12 +98,12 @@ func (t *Test) NewAggregator(filled bool) *aggregator { func (t *Test) NewRound(cs CacheReader) *round { feederID := r.Intn(len(params.TokenFeeders)-1) + 1 - round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) + round := newRound(int64(feederID), params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false, nil) return round } func (t *Test) NewRoundWithFeederID(cs CacheReader, feederID int64) *round { - round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false) + round := newRound(feederID, params.TokenFeeders[feederID], int64(params.MaxNonce), cs, defaultAggMedian, false, nil) return round } diff --git a/x/oracle/keeper/feedermanagement/round.go b/x/oracle/keeper/feedermanagement/round.go index d2b5384ea..311b149e4 100644 --- a/x/oracle/keeper/feedermanagement/round.go +++ b/x/oracle/keeper/feedermanagement/round.go @@ -3,10 +3,11 @@ package feedermanagement import ( "fmt" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) -func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm, twoPhases bool) *round { +func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowSize int64, cache CacheReader, algo AggAlgorithm, twoPhases bool, postHandler common.PostAggregationHandler) *round { ret := &round{ // #nosec G115 startBaseBlock: int64(tokenFeeder.StartBaseBlock), @@ -21,7 +22,6 @@ func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowS // #nosec G115 tokenID: int64(tokenFeeder.TokenID), cache: cache, - // default value status: roundStatusClosed, a: nil, @@ -31,7 +31,9 @@ func newRound(feederID int64, tokenFeeder *oracletypes.TokenFeeder, quoteWindowS twoPhases: twoPhases, } if twoPhases { - ret.rawData = make([][]byte, 0) + if postHandler != nil { + ret.h = postHandler + } } return ret } @@ -173,6 +175,11 @@ func (r *round) PrepareForNextBlock(currentHeight int64) (open bool) { r.openQuotingWindow() open = true } + if r.twoPhases { + // wait quoteWindowSize-1 blocks for proposer to collecting pieces + // #nosec G115 + r.roundPhaseTwoStartBlock = uint64(r.roundBaseBlock + 2*r.quoteWindowSize) + } } return open } diff --git a/x/oracle/keeper/feedermanagement/types.go b/x/oracle/keeper/feedermanagement/types.go index 0c9b992b8..9ddc374f1 100644 --- a/x/oracle/keeper/feedermanagement/types.go +++ b/x/oracle/keeper/feedermanagement/types.go @@ -6,6 +6,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/imua-xyz/imuachain/x/oracle/keeper/common" + "github.com/imua-xyz/imuachain/x/oracle/types" oracletypes "github.com/imua-xyz/imuachain/x/oracle/types" ) @@ -211,6 +212,12 @@ type round struct { // roundBaseBlock is the round base block of current round roundBaseBlock int64 + + // roundPhaseTwoStartBlock defines the first block when the oracle begins accepting second-phase messages containing raw data pieces during two-phase aggregation + // We delay collecting raw data pieces for several blocks after first-phase consensus to give proposers time to receive and prepare messages with raw data pieces and proofs + // Since proposers are penalized for not including necessary raw data pieces, we provide this buffer to prevent unfair punishment due to overly strict timeouts + roundPhaseTwoStartBlock uint64 + // roundID is the round ID of current round roundID int64 // status indicates the status of current round @@ -224,13 +231,12 @@ type round struct { // twoPhases indicates if the corresponding tokenfeeder requires 2-phase aggregation twoPhases bool - // rawData is original data for tokenFeeder with 2-phases aggregation rule - // a validator can provide more than one rawData for one round - rawData [][]byte - // in 2-phases aggregation, the aggregated price is the hash root of pieces of rawData, when we received every piece to recover the whole original rawData, this flag is set to true - rawDataSealed bool m *oracletypes.MerkleTree + // cachedProofForBlock keeps added proof cache from current block, used for EndBlock to update state + // we don't do any state update during oracle tx executing, so we cached the information before endBlock if any + // this will be reset on endBlock after update state + cachedProofForBlock types.Proof h common.PostAggregationHandler } diff --git a/x/oracle/keeper/msg_server_price_feed.go b/x/oracle/keeper/msg_server_price_feed.go index 69c94a20d..db48f05df 100644 --- a/x/oracle/keeper/msg_server_price_feed.go +++ b/x/oracle/keeper/msg_server_price_feed.go @@ -4,6 +4,7 @@ import ( "context" "crypto/sha256" "encoding/base64" + "encoding/hex" "strconv" "strings" "time" @@ -32,7 +33,7 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice logger := ms.Logger(ctx) validator, _ := types.ConsAddrStrFromCreator(msg.Creator) - logQuote := []interface{}{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} + logQuote := []any{"feederID", msg.FeederID, "baseBlock", msg.BasedBlock, "proposer", validator, "msg-nonce", msg.Nonce, "height", ctx.BlockHeight()} if err := checkTimestamp(ctx, msg); err != nil { logger.Error("quote has invalid timestamp", append(logQuote, "error", err)...) @@ -41,8 +42,9 @@ func (ms msgServer) CreatePrice(goCtx context.Context, msg *types.MsgCreatePrice // goto rawData process which needs no 'aggragation', we just verify the provided piece with recoreded root which got consensus if msg.IsPhaseTwo() { - err := ms.ProcessRawData(msg) - if err != nil { + cachedRawData, err := ms.ProcessRawData(msg) + if err == nil { + logger.Info("quote of 2nd-phase added rawData of hash:%s", hex.EncodeToString(cachedRawData)) return &types.MsgCreatePriceResponse{}, nil } logger.Error("quote of 2nd-phase for rawData failed", append(logQuote, "error", err)) diff --git a/x/oracle/keeper/nst_post_aggregation.go b/x/oracle/keeper/nst_post_aggregation.go index 9660297b4..553c105cc 100644 --- a/x/oracle/keeper/nst_post_aggregation.go +++ b/x/oracle/keeper/nst_post_aggregation.go @@ -25,40 +25,6 @@ import ( // undelegate: update operator's price, operator's totalAmount, operator's totalShare, staker's share // msg(refund or slash on beaconChain): update staker's price, operator's price -type NSTAssetID string - -const ( - // NSTETHAssetAddr = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee" - // TODO: we currently support NSTETH only which has capped effective balance for one validator - // TODO: this is a bad practice, and for Lz, they have different version of endpoint with different chainID - // Do the validation before invoke oracle related functions instead of check these hard code ids here. - ETHMainnetChainID = "0x7595" - ETHLocalnetChainID = "0x65" - ETHHoleskyChainID = "0x9d19" - ETHSepoliaChainID = "0x9ce1" - - NSTETHAssetIDMainnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x7595" - NSTETHAssetIDLocalnet NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x65" - NSTETHAssetIDHolesky NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x9d19" - NSTETHAssetIDSepolia NSTAssetID = "0xeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee_0x9ce1" -) - -var ( - limitedChangeNST = map[NSTAssetID]bool{ - NSTETHAssetIDMainnet: true, - NSTETHAssetIDLocalnet: true, - NSTETHAssetIDHolesky: true, - NSTETHAssetIDSepolia: true, - } - - maxEffectiveBalances = map[NSTAssetID]int{ - NSTETHAssetIDMainnet: 32, - NSTETHAssetIDLocalnet: 32, - NSTETHAssetIDHolesky: 32, - NSTETHAssetIDSepolia: 32, - } -) - // SetStakerInfos set stakerInfos for the specific assetID func (k Keeper) SetStakerInfos(ctx sdk.Context, assetID string, stakerInfos []*types.StakerInfo) { store := ctx.KVStore(k.storeKey) @@ -176,9 +142,6 @@ func (k Keeper) GetAllStakerListAssets(ctx sdk.Context) (ret []types.StakerListA } func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, stakerAddr, validatorPubkey string, amount sdkmath.Int) error { - if !IsLimitedChangeNST(assetID) { - return types.ErrNSTAssetNotSupported - } _, decimalInt, err := k.getDecimal(ctx, assetID) if err != nil { return err @@ -287,78 +250,6 @@ func (k Keeper) UpdateNSTValidatorListForStaker(ctx sdk.Context, assetID, staker return nil } -// UpdateNSTByBalanceChange updates balance info for staker under native-restaking asset of assetID when its balance changed by slash/refund on the source chain (beacon chain for eth) -func (k Keeper) UpdateNSTByBalanceChange(ctx sdk.Context, assetID string, price types.PriceTimeRound, version int64) error { - if !IsLimitedChangeNST(assetID) { - return types.ErrNSTAssetNotSupported - } - if version != k.GetNSTVersion(ctx, assetID) { - return errors.New("version not match") - } - _, chainID, _ := assetstypes.ParseID(assetID) - rawData := []byte(price.Price) - if len(rawData) < 32 { - return errors.New("length of indicate maps for stakers should be exactly 32 bytes") - } - sl := k.GetStakerList(ctx, assetID) - if len(sl.StakerAddrs) == 0 { - return errors.New("staker list is empty") - } - stakerChanges, err := parseBalanceChangeCapped(rawData, sl) - if err != nil { - return fmt.Errorf("failed to parse balance changes: %w", err) - } - store := ctx.KVStore(k.storeKey) - for _, stakerAddr := range sl.StakerAddrs { - // if stakerAddr is not in stakerChanges, then the change would be set to 0 which is expected - change := stakerChanges[stakerAddr] - key := types.NativeTokenStakerKey(assetID, stakerAddr) - value := store.Get(key) - if value == nil { - return errors.New("stakerInfo does not exist") - } - stakerInfo := &types.StakerInfo{} - k.cdc.MustUnmarshal(value, stakerInfo) - newBalance := types.BalanceInfo{} - if length := len(stakerInfo.BalanceList); length > 0 { - newBalance = *(stakerInfo.BalanceList[length-1]) - } - newBalance.Block = uint64(ctx.BlockHeight()) - // we set index as a global reference used through all rounds - newBalance.Index++ - newBalance.Change = types.Action_ACTION_SLASH_REFUND - newBalance.RoundID = price.RoundID - // balance update are based on initial/max effective balance: 32 - maxBalance := maxEffectiveBalance(assetID) * (len(stakerInfo.ValidatorPubkeyList)) - balance := maxBalance + change - // there's one case that this delta might be more than previous Balance - // staker's validatorlist: {v1, v2, v3, v5} - // in one same block: withdraw v2, v3, v5, balance of v2, v3, v5 all be slashed by -16 - // => amount: 32*4->32(by withdraw), the validatorList of feeder will be updated on next block, so it will report the balance change of v5: -16 as in the staker's balance change, result to: 32*4->32-> 32-16*3 = -16 - // we will just ignore this misbehavior introduced by synchronize-issue, and this will be correct in next block/round - if balance > maxBalance || balance < 0 { - // balance should not be able to be reduced to 0 by balance change - return errors.New("effective balance should never exceeds 32 for one validator and should be positive") - } - - if delta := int64(balance) - newBalance.Balance; delta != 0 { - decimal, _, err := k.getDecimal(ctx, assetID) - if err != nil { - return err - } - if err := k.delegationKeeper.UpdateNSTBalance(ctx, getStakerID(stakerAddr, chainID), assetID, sdkmath.NewIntWithDecimal(delta, decimal)); err != nil { - return err - } - newBalance.Balance = int64(balance) - } - // newBalance.Balance += int64(change) - stakerInfo.Append(&newBalance) - bz := k.cdc.MustMarshal(stakerInfo) - store.Set(key, bz) - } - return nil -} - // IncreaseNSTVersion increases the version of native token for assetID func (k Keeper) IncreaseNSTVersion(ctx sdk.Context, assetID string) int64 { store := ctx.KVStore(k.storeKey) @@ -410,15 +301,6 @@ func getStakerID(stakerAddr string, chainID uint64) string { return strings.Join([]string{strings.ToLower(stakerAddr), hexutil.EncodeUint64(chainID)}, utils.DelimiterForID) } -// IsLimitChangesNST returns that is input assetID corresponding to asset which balance change has a cap limit -func IsLimitedChangeNST(assetID string) bool { - return limitedChangeNST[NSTAssetID(assetID)] -} - -func maxEffectiveBalance(assetID string) int { - return maxEffectiveBalances[NSTAssetID(assetID)] -} - func getNSTVersionFromDetID(detID string) (int64, error) { parsedDetID := strings.Split(detID, "_") if len(parsedDetID) != 2 { @@ -432,6 +314,60 @@ func getNSTVersionFromDetID(detID string) (int64, error) { } // UpdateNSTBalanceChange serves the post handling for nst balance change -func UpdateNSTBalanceChange(rawData []byte, ctx sdk.Context, k common.KeeperOracle) error { +func UpdateNSTBalanceChange(ctx sdk.Context, rawData []byte, feederID, roundID uint64, kInf common.KeeperOracle) error { + balanceChanges := &types.RawDataNST{} + kInf.MustUnmarshal(rawData, balanceChanges) + + k, ok := kInf.(Keeper) + if !ok { + return errors.New("input keeper interface type error") + } + assetID := k.GetParamsFromCache().GetAssetIDForNSTFromFeederID(feederID) + // TODO(leonz): use uint64 for version state + if balanceChanges.Version != uint64(k.GetNSTVersion(ctx, assetID)) { + return errors.New("version not match") + } + _, chainID, _ := assetstypes.ParseID(assetID) + sl := k.GetStakerList(ctx, assetID) + if len(sl.StakerAddrs) == 0 { + return errors.New("staker list is empty") + } + + store := ctx.KVStore(k.storeKey) + + for _, changeKV := range balanceChanges.NstBalanceChanges { + stakerAddr := sl.StakerAddrs[changeKV.StakerIndex] + key := types.NativeTokenStakerKey(assetID, stakerAddr) + value := store.Get(key) + if value == nil { + return errors.New("stakerInfo does not exist") + } + stakerInfo := &types.StakerInfo{} + k.cdc.MustUnmarshal(value, stakerInfo) + newBalance := types.BalanceInfo{} + if length := len(stakerInfo.BalanceList); length > 0 { + newBalance = *(stakerInfo.BalanceList[length-1]) + } + newBalance.Block = uint64(ctx.BlockHeight()) + // we set index as a global reference used through all rounds + newBalance.Index++ + newBalance.Change = types.Action_ACTION_SLASH_REFUND + newBalance.RoundID = roundID + balance := changeKV.Balance + + if delta := int64(balance) - newBalance.Balance; delta != 0 { + decimal, _, err := k.getDecimal(ctx, assetID) + if err != nil { + return err + } + if err := k.delegationKeeper.UpdateNSTBalance(ctx, getStakerID(stakerAddr, chainID), assetID, sdkmath.NewIntWithDecimal(delta, decimal)); err != nil { + return err + } + newBalance.Balance = int64(balance) + } + stakerInfo.Append(&newBalance) + bz := k.cdc.MustMarshal(stakerInfo) + store.Set(key, bz) + } return nil } diff --git a/x/oracle/keeper/post_aggregation.go b/x/oracle/keeper/post_aggregation.go index 2fd13edf2..caeaf07ee 100644 --- a/x/oracle/keeper/post_aggregation.go +++ b/x/oracle/keeper/post_aggregation.go @@ -1,21 +1,28 @@ package keeper -import "github.com/imua-xyz/imuachain/x/oracle/keeper/common" +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/imua-xyz/imuachain/x/oracle/keeper/common" +) // RegisterPostAggregation registers handler for tokenfeeder set with deterministic source which need to do some process with the deterministic aggregated result // this is used to register the post handlers served for some customer defined deterministic source oracle requirement -func (k *Keeper) RegisterPostAggregation() { +func (k Keeper) RegisterPostAggregation() { // k.BondPostAggregation(1, UpdateNSTBalanceChange) } -func (k *Keeper) BondPostAggregation(feederID int64, postHandler common.PostAggregationHandler) { +func (k Keeper) BondPostAggregation(feederID int64, postHandler common.PostAggregationHandler) { k.postHandlers[feederID] = postHandler } -func (k *Keeper) GetPostAggregation(feederID int64) (handler common.PostAggregationHandler, found bool) { +func (k Keeper) GetPostAggregation(feederID int64) (handler common.PostAggregationHandler, found bool) { if k.postHandlers == nil { return nil, false } handler, found = k.postHandlers[feederID] return } + +func (k Keeper) MustUnmarshal(bz []byte, ptr codec.ProtoMarshaler) { + k.cdc.MustUnmarshal(bz, ptr) +} diff --git a/x/oracle/keeper/prices.go b/x/oracle/keeper/prices.go index 0fdd1ccaf..3f786ba01 100644 --- a/x/oracle/keeper/prices.go +++ b/x/oracle/keeper/prices.go @@ -211,29 +211,29 @@ func (k Keeper) AppendPriceTR(ctx sdk.Context, tokenID uint64, priceTR types.Pri } k.IncreaseNextRoundID(ctx, tokenID) // skip post processing for nil deterministic ID - if detID == types.NilDetID { - return true - } - - // skip post processing for empty price - if len(priceTR.Price) == 0 { - return true - } - - if nstAssetID := p.GetAssetIDForNSTFromTokenID(tokenID); len(nstAssetID) > 0 { - nstVersion, err := getNSTVersionFromDetID(detID) - if err != nil || nstVersion == 0 { - logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err, "nstVersion", nstVersion, "tokenID", tokenID, "roundID", nextRoundID) - return true - } - err = k.UpdateNSTByBalanceChange(ctx, nstAssetID, priceTR, nstVersion) - if err != nil { - // we just report this error in log to notify validators - logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) - } else { - logger.Info("updated balance change for NST") - } - } + // if detID == types.NilDetID { + // return true + // } + // + // // skip post processing for empty price + // if len(priceTR.Price) == 0 { + // return true + // } + // + // if nstAssetID := p.GetAssetIDForNSTFromTokenID(tokenID); len(nstAssetID) > 0 { + // nstVersion, err := getNSTVersionFromDetID(detID) + // if err != nil || nstVersion == 0 { + // logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err, "nstVersion", nstVersion, "tokenID", tokenID, "roundID", nextRoundID) + // return true + // } + // err = k.UpdateNSTByBalanceChange(ctx, nstAssetID, priceTR, nstVersion) + // if err != nil { + // // we just report this error in log to notify validators + // logger.Error(types.ErrUpdateNativeTokenVirtualPriceFail.Error(), "error", err) + // } else { + // logger.Info("updated balance change for NST") + // } + // } return true } diff --git a/x/oracle/keeper/two_phases.go b/x/oracle/keeper/two_phases.go index dbfc86255..132724391 100644 --- a/x/oracle/keeper/two_phases.go +++ b/x/oracle/keeper/two_phases.go @@ -2,26 +2,76 @@ package keeper import ( "fmt" + "sort" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/ethereum/go-ethereum/common" "github.com/imua-xyz/imuachain/x/oracle/types" ) // SetNextPieceIndex sets the next-piece-index of feederID for 'node-recovery' -func (k Keeper) SetNextPieceIndex(ctx sdk.Context, feederID uint64, pieceIndex uint32) { +// func (k Keeper) SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, pieceIndex uint32) { +// store := ctx.KVStore(k.storeKey) +// key := types.TwoPhasesFeederKey(feederID) +// store.Set(key, types.Uint32Bytes(pieceIndex)) +// } + +func (k Keeper) Setup2ndPhase(ctx sdk.Context, feederID uint64, validators []string, leafCount uint32, rootHash []byte) { + k.Setup2ndPhaseNextPieceIndex(ctx, feederID, validators) + k.SetFeederTreeInfo(ctx, feederID, leafCount, rootHash) +} + +// we group the validators by feederID instead of the opposite way because when we set up or clear, +// we do this for all validators under the feederID. When changes happen to a single validator, we enter "forceSeal," +// which removes all validators under that feederID. +// Therefore, we use feederID→[]{validators,index}, not validator→[]{feederID, index} or feederID/validator→index. +// While the latter approach would make "checkAndIncrease" faster when querying the index for a specific +// validator under a specific feederID, it trades off many I/O operations with memory iteration, which isn't optimal. +func (k Keeper) Setup2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { store := ctx.KVStore(k.storeKey) - key := types.TwoPhasesFeederKey(feederID) - store.Set(key, types.Uint32Bytes(pieceIndex)) + key := types.TwoPhasesFeederValidatorsKey(feederID) + validatorIndexList := make([]*types.ValidatorIndex, 0, len(validators)) + // set next piece index for all input validators to 0 under the input feederID + if len(validators) > 0 { + for _, validator := range validators { + validatorIndexList = append(validatorIndexList, &types.ValidatorIndex{Validator: validator, NextIndex: 0}) + } + bz := k.cdc.MustMarshal(&types.FeederValidatorsIndex{ + ValidatorIndexList: validatorIndexList, + }) + store.Set(key, bz) + } + // set next piece index for feederID to 0 + key = types.TwoPhasesFeederKey(feederID) + store.Set(key, types.Uint32Bytes(0)) } -func (k Keeper) ClearNextPieceIndex(ctx sdk.Context, feederID uint64) { +func (k Keeper) Clear2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64) { store := ctx.KVStore(k.storeKey) key := types.TwoPhasesFeederKey(feederID) + // delete nextPieceIndex for feederID + store.Delete(key) + key = types.TwoPhasesFeederValidatorsKey(feederID) + // delete nextPieceIndex for all validators with feederID store.Delete(key) } +func (k Keeper) SetNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64, nextPieceIndex uint32) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + store.Set(key, types.Uint32Bytes(nextPieceIndex)) +} + +// func (k Keeper) ClearNextPieceIndexForFeeder(ctx sdk.Context, feederID uint64) { +// store := ctx.KVStore(k.storeKey) +// key := types.TwoPhasesFeederKey(feederID) +// store.Delete(key) +// } + +// TODO: remove this // NextPieceIndexByFeederID read directly from memory and return the next-piece-index of input feederID func (k Keeper) NextPieceIndexByFeederID(ctx sdk.Context, feederID uint64) (uint32, bool) { + // query from mem-cache return k.FeederManager.NextPieceIndexByFeederID(feederID) } @@ -37,57 +87,155 @@ func (k Keeper) CheckAndIncreaseNextPieceIndex(ctx sdk.Context, validator string return 0, fmt.Errorf("piece_index_check_failed: feederID:%d, max_piece_index:%d, got:%d", feederID, maxPieceIndex, nextPieceIndex) } store := ctx.KVStore(k.storeKey) - key := types.TwoPhasesValidatorPieceKey(validator, feederID) + // key := types.TwoPhasesValidatorPieceKey(validator, feederID) + key := types.TwoPhasesFeederValidatorsKey(feederID) bz := store.Get(key) if bz == nil { return 0, fmt.Errorf("piece_index_check_failed: validator_not_found: validator:%s, feeder_id:%d", validator, feederID) } - expectedPieceIndex := types.BytesToUint32(bz) - if nextPieceIndex != expectedPieceIndex { - return 0, fmt.Errorf("piece_index_check_failed: non_conseecutive: expected:%d, recived:%d", expectedPieceIndex, nextPieceIndex) + feederValidatorsIndex := &types.FeederValidatorsIndex{} + k.cdc.MustUnmarshal(bz, feederValidatorsIndex) + for _, validatorIndex := range feederValidatorsIndex.ValidatorIndexList { + if validatorIndex.Validator == validator { + if validatorIndex.NextIndex == nextPieceIndex { + validatorIndex.NextIndex++ + bz = k.cdc.MustMarshal(feederValidatorsIndex) + store.Set(key, bz) + return nextPieceIndex + 1, nil + } + return 0, fmt.Errorf("piece_index_check_failed: non_conseecutive: expected:%d, recived:%d", validatorIndex.NextIndex, nextPieceIndex) + } } - store.Set(key, types.Uint32Bytes(nextPieceIndex+1)) - return nextPieceIndex + 1, nil + return 0, fmt.Errorf("piece_index_check_failed: next_piece_index not found for valdiator:%s", validator) } -func (k Keeper) Setup2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { +// used for recovery, otherwise mem-cache are used directly for reading +func (k Keeper) SetRawDataPiece(ctx sdk.Context, feederID uint64, pieceIndex uint32, rawData []byte) { store := ctx.KVStore(k.storeKey) - // 1. set nextPieceIndex for feederID, first piece index is 0 - store.Set(types.TwoPhasesFeederKey(feederID), types.Uint32Bytes(0)) + key := types.TwoPhasesFeederRawDataKey(feederID, pieceIndex) + store.Set(key, rawData) +} - // 2. set nextPieceIndex for all activeValidators, fisr piece index is 0 - for _, validator := range validators { - store.Set(types.TwoPhasesValidatorPieceKey(validator, feederID), types.Uint32Bytes(0)) +func (k Keeper) GetRawDataPieces(ctx sdk.Context, feederID uint64) ([][]byte, error) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederKey(feederID) + bz := store.Get(key) + if bz == nil { + return nil, nil } + nextPieceIndex := types.BytesToUint32(bz) + if nextPieceIndex == 0 { + return nil, nil + } + ret := make([][]byte, 0, nextPieceIndex) + for i := uint32(0); i < nextPieceIndex; i++ { + key = types.TwoPhasesFeederRawDataKey(feederID, i) + bz = store.Get(key) + if bz == nil { + // this should not happen, we got something wrong in db + return nil, fmt.Errorf("there's something wrong in db, miss piece:%d of rawData for feederID:%d", i, feederID) + } + ret = append(ret, bz) + } + return ret, nil } -func (k Keeper) Clear2ndPhaseNextPieceIndex(ctx sdk.Context, feederID uint64, validators []string) { - // TODO(leonz): implement me - // 1. remove feederID->nextPieceIndex, 2. remove validator/feederID->nextPieceIndex +func (k Keeper) SetFeederTreeInfo(ctx sdk.Context, feederID uint64, count uint32, rootHash []byte) { + if count == 0 || len(rootHash) != common.HashLength { + return + } store := ctx.KVStore(k.storeKey) - store.Delete(types.TwoPhasesFeederKey(feederID)) - // 2. remove nextPieceIndex for validators - for _, validator := range validators { - store.Delete(types.TwoPhasesValidatorPieceKey(validator, feederID)) + key := types.TwoPhaseFeederTreeInfoKey(feederID) + treeInfo := &types.TreeInfo{ + LeafCount: count, + RootHash: rootHash, } - + bz := k.cdc.MustMarshal(treeInfo) + store.Set(key, bz) } -// set feederID/pieceIndex -> rawData(]byte) - -// read feederID/pieceIndex -> rawData([]byte) - -// read all pieces of rawData from feederID/ - -// clear all rawData for feederID/ +func (k Keeper) GetFeederTreeInfo(ctx sdk.Context, feederID uint64) (uint32, []byte) { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhaseFeederTreeInfoKey(feederID) + bz := store.Get(key) + if bz == nil { + return 0, nil + } + treeInfo := &types.TreeInfo{} + k.cdc.MustUnmarshal(bz, treeInfo) + return treeInfo.LeafCount, treeInfo.RootHash +} -// update feederID -> proof([][]byte) +// used for recovery, otherwise, mem-cache are used directly for reading +func (k Keeper) AddNodesToMerkleTree(ctx sdk.Context, feederID uint64, proof []*types.HashNode) { + if len(proof) == 0 { + return + } + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederProofKey(feederID) + bz := store.Get(key) + merkle := &types.FlattenTree{} + k.cdc.MustUnmarshal(bz, merkle) + nodes := merkle.Nodes + sort.Slice(proof, func(i, j int) bool { return proof[i].Index < proof[j].Index }) + uniqueOrderedProof := make([]*types.HashNode, 0, len(proof)) + for i := 0; i < len(proof); i++ { + if i == 0 || proof[i].Index != proof[i-1].Index { + uniqueOrderedProof = append(uniqueOrderedProof, proof[i]) + } + } + l1 := len(nodes) + l2 := len(uniqueOrderedProof) + i := 0 + j := 0 + newList := make([]*types.HashNode, 0, l1+l2) + for i < l1 && j < l2 { + switch { + case nodes[i].Index == uniqueOrderedProof[j].Index: + newList = append(newList, nodes[i]) + i++ + j++ + case nodes[i].Index < uniqueOrderedProof[j].Index: + newList = append(newList, nodes[i]) + i++ + case nodes[i].Index > uniqueOrderedProof[j].Index: + newList = append(newList, proof[j]) + j++ + } + } + if i < l1 { + newList = append(newList, nodes[i:]...) + } else { + newList = append(newList, uniqueOrderedProof[j:]...) + } + merkle.Nodes = newList + bz = k.cdc.MustMarshal(merkle) + store.Set(key, bz) +} -// clear the whole proof under feederID +func (k Keeper) GetNodesFromMerkleTree(ctx sdk.Context, feederID uint64) []*types.HashNode { + store := ctx.KVStore(k.storeKey) + key := types.TwoPhasesFeederProofKey(feederID) + bz := store.Get(key) + if bz == nil { + return nil + } + mt := &types.FlattenTree{} + k.cdc.MustUnmarshal(bz, mt) + return mt.Nodes +} // clear feederID/, clear: // 1. rawData // 2. proof -func (k Keeper) Clear2ndPhases(ctx sdk.Context, feederID uint64) { - +func (k Keeper) Clear2ndPhase(ctx sdk.Context, feederID uint64, rootIndex uint32) { + store := ctx.KVStore(k.storeKey) + // clear rawData + for i := uint32(0); i <= rootIndex; i++ { + store.Delete(types.TwoPhasesFeederRawDataKey(feederID, i)) + } + // clear proof + store.Delete(types.TwoPhasesFeederProofKey(feederID)) + // clear indexex + k.Clear2ndPhaseNextPieceIndex(ctx, feederID) } diff --git a/x/oracle/types/key_two_phase.go b/x/oracle/types/key_two_phase.go index 1650f4ee9..05a7dadb2 100644 --- a/x/oracle/types/key_two_phase.go +++ b/x/oracle/types/key_two_phase.go @@ -1,9 +1,12 @@ package types const ( - TwoPhasesPrefix = "TwoPhases/" - FeederPrefix = TwoPhasesPrefix + "feeder/" - ValidatorPrefix = TwoPhasesPrefix + "validator/" + TwoPhasesPrefix = "TwoPhases/" + FeederPrefix = TwoPhasesPrefix + "feeder/" + FeederValidatorsPrefix = TwoPhasesPrefix + "validators/" + FeederRawDataPrefix = TwoPhasesPrefix + "rawData/" + FeederProofPrefix = TwoPhasesPrefix + "proof/" + FeederTreeInfoPrefix = TwoPhasesPrefix + "treeInfo/" ) func TwoPhasesKeyPrefix() []byte { @@ -14,8 +17,8 @@ func TwoPhasesFeederKeyPrefix() []byte { return []byte(FeederPrefix) } -func TwoPhasesValidatorKeyPrefix() []byte { - return []byte(ValidatorPrefix) +func TwoPhasesFeederValidatorsKeyPrefix() []byte { + return []byte(FeederValidatorsPrefix) } func TwoPhasesFeederKey(feederID uint64) []byte { @@ -25,11 +28,46 @@ func TwoPhasesFeederKey(feederID uint64) []byte { return key } -func TwoPhasesValidatorPieceKey(validator string, feederID uint64) []byte { +func TwoPhasesFeederValidatorsKey(feederID uint64) []byte { var key []byte - key = append(key, ValidatorPrefix...) - key = append(key, []byte(validator)...) + key = append(key, FeederValidatorsPrefix...) + key = append(key, Uint64Bytes(feederID)...) + return key +} + +func TwoPhasesFeederRawDataKeyPrefix(feederID uint64) []byte { + var key []byte + key = append(key, FeederRawDataPrefix...) + key = append(key, DelimiterForCombinedKey) + key = append(key, Uint64Bytes(feederID)...) key = append(key, DelimiterForCombinedKey) + return key +} + +func TwoPhasesFeederRawDataKey(feederID uint64, index uint32) []byte { + var key []byte + key = append(key, TwoPhasesFeederRawDataKeyPrefix(feederID)...) + return append(key, Uint32Bytes(index)...) +} + +func TwoPhasesFeederProofKeyPrefix() []byte { + return []byte(FeederProofPrefix) +} + +func TwoPhasesFeederProofKey(feederID uint64) []byte { + var key []byte + key = append(key, FeederProofPrefix...) + key = append(key, Uint64Bytes(feederID)...) + return key +} + +func TwoPhaseFeederTreeInfoKeyPrefix() []byte { + return []byte(FeederTreeInfoPrefix) +} + +func TwoPhaseFeederTreeInfoKey(feederID uint64) []byte { + var key []byte + key = append(key, FeederTreeInfoPrefix...) key = append(key, Uint64Bytes(feederID)...) return key } diff --git a/x/oracle/types/merkletree.go b/x/oracle/types/merkletree.go index 1325533a7..8bc448fba 100644 --- a/x/oracle/types/merkletree.go +++ b/x/oracle/types/merkletree.go @@ -41,13 +41,20 @@ func (p *Proof) getHashByIndex(index uint32) []byte { return nil } -// hashNode represents a node including index and hash -type HashNode struct { - Index uint32 - Hash []byte +func (m *MerkleTree) SetRawDataPieces(pieces [][]byte) { + m.pieces = pieces +} + +func (m *MerkleTree) SetProofNodes(nodes []*HashNode) { + for _, node := range nodes { + n := m.t[node.Index] + if n == nil { + continue + } + n.hash = node.Hash + } } -// func (m *MerkleTree) getPathFromLeafIndex(index uint32) []uint32 { func (m *MerkleTree) ProofPathFromLeafIndex(index uint32) []uint32 { if index >= m.leafCount { return nil @@ -211,7 +218,7 @@ func (m *MerkleTree) PieceByIndex(targetIndex uint32) ([]byte, bool) { // return rawData as a whole, with true/false to tell if we got the completed rawData // when fasel, the returned first value should be nil func (m *MerkleTree) CompleteRawData() ([]byte, bool) { - if len(m.pieces) < int(m.leafCount) { + if m == nil || len(m.pieces) < int(m.leafCount) || m.leafCount == 0 { return nil, false } if len(m.rawData) > 0 { @@ -229,7 +236,11 @@ func (m *MerkleTree) CompleteRawData() ([]byte, bool) { // only when MerkleTree is set to non-zero leafCount with less amount of pieces than that leafCount we got false returned // so when the return value is false, it also indicates that this MerkleTree is collecting pieces func (m *MerkleTree) Completed() bool { - return len(m.pieces) == int(m.leafCount) + return m != nil && len(m.pieces) == int(m.leafCount) +} + +func (m *MerkleTree) CollectingRawData() bool { + return m != nil && len(m.pieces) < int(m.leafCount) } // (0, true) means the first leaf node is cached @@ -268,15 +279,15 @@ func (m *MerkleTree) RootHash() []byte { return m.root } +func (m *MerkleTree) RootIndex() uint32 { + return m.rootIndex +} + // NewMT new a merkle tree initialized with the topology from input pieceSize and totalSize -func NewMT(pieceSize, totalSize uint32, root []byte) *MerkleTree { - if totalSize == 0 { +func NewMT(pieceSize, leafCount uint32, root []byte) *MerkleTree { + if leafCount < 1 { return nil } - leafCount := totalSize / pieceSize - if totalSize%pieceSize > 0 { - leafCount++ - } originalLeafCount := leafCount ret := &MerkleTree{ diff --git a/x/oracle/types/message_create_price.go b/x/oracle/types/message_create_price.go index 0107f2707..31d666770 100644 --- a/x/oracle/types/message_create_price.go +++ b/x/oracle/types/message_create_price.go @@ -49,6 +49,10 @@ func (msg *MsgCreatePrice) ValidateBasic() error { return nil } +func (msg *MsgCreatePrice) IsNotTwoPhases() bool { + return msg.Phase == AggregationPhaseUnspecified +} + func (msg *MsgCreatePrice) IsPhaseOne() bool { return msg.Phase == AggregationPhaseOne } diff --git a/x/oracle/types/params.go b/x/oracle/types/params.go index cdce66c30..fade7b2ad 100644 --- a/x/oracle/types/params.go +++ b/x/oracle/types/params.go @@ -507,8 +507,14 @@ func (p Params) GetTokenIDFromAssetID(assetID string) int { return 0 } -func (p Params) GetAssetIDForNSTFromTokenID(tokenID uint64) string { - assetIDs := p.GetAssetIDsFromTokenID(tokenID) +func (p Params) GetAssetIDForNSTFromFeederID(feederID uint64) string { + tokenID := p.TokenFeeders[feederID].TokenID + + if tokenID >= uint64(len(p.Tokens)) { + return "" + } + assetIDs := strings.Split(p.Tokens[tokenID].AssetID, ",") + for _, assetID := range assetIDs { if nstChain, ok := strings.CutPrefix(strings.ToLower(assetID), NSTIDPrefix); ok { if NSTChain, ok := NSTChainsInverted[nstChain]; ok { @@ -519,13 +525,6 @@ func (p Params) GetAssetIDForNSTFromTokenID(tokenID uint64) string { return "" } -func (p Params) GetAssetIDsFromTokenID(tokenID uint64) []string { - if tokenID >= uint64(len(p.Tokens)) { - return nil - } - return strings.Split(p.Tokens[tokenID].AssetID, ",") -} - func (p Params) IsDeterministicSource(sourceID uint64) bool { return p.Sources[sourceID].Deterministic } diff --git a/x/oracle/types/rawdata_nst.pb.go b/x/oracle/types/rawdata_nst.pb.go index 5e1dbfa38..c852f06c1 100644 --- a/x/oracle/types/rawdata_nst.pb.go +++ b/x/oracle/types/rawdata_nst.pb.go @@ -27,7 +27,7 @@ type NSTKV struct { // staker index for a nst defined on imuachain side StakerIndex uint32 `protobuf:"varint,1,opt,name=staker_index,json=stakerIndex,proto3" json:"staker_index,omitempty"` // balance change since last update - BalanceChange int64 `protobuf:"varint,2,opt,name=balance_change,json=balanceChange,proto3" json:"balance_change,omitempty"` + Balance int64 `protobuf:"varint,2,opt,name=balance,proto3" json:"balance,omitempty"` } func (m *NSTKV) Reset() { *m = NSTKV{} } @@ -70,17 +70,18 @@ func (m *NSTKV) GetStakerIndex() uint32 { return 0 } -func (m *NSTKV) GetBalanceChange() int64 { +func (m *NSTKV) GetBalance() int64 { if m != nil { - return m.BalanceChange + return m.Balance } return 0 } // RawDataNST represents balance changes of all stakers for a NST type RawDataNST struct { + Version uint64 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` // NSTKV use array to describe {staker_indx: balance_change} for all stakers whose balance had changed - NstBalanceChanges []*NSTKV `protobuf:"bytes,1,rep,name=nst_balance_changes,json=nstBalanceChanges,proto3" json:"nst_balance_changes,omitempty"` + NstBalanceChanges []*NSTKV `protobuf:"bytes,2,rep,name=nst_balance_changes,json=nstBalanceChanges,proto3" json:"nst_balance_changes,omitempty"` } func (m *RawDataNST) Reset() { *m = RawDataNST{} } @@ -116,6 +117,13 @@ func (m *RawDataNST) XXX_DiscardUnknown() { var xxx_messageInfo_RawDataNST proto.InternalMessageInfo +func (m *RawDataNST) GetVersion() uint64 { + if m != nil { + return m.Version + } + return 0 +} + func (m *RawDataNST) GetNstBalanceChanges() []*NSTKV { if m != nil { return m.NstBalanceChanges @@ -133,23 +141,24 @@ func init() { } var fileDescriptor_77c72bbd82fbb1be = []byte{ - // 254 bytes of a gzipped FileDescriptorProto + // 267 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0xcd, 0xcc, 0x2d, 0x4d, 0x4c, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0xcf, 0x2f, 0x4a, 0x4c, 0xce, 0x49, 0xd5, 0x2f, 0x33, 0xd4, 0x2f, 0x4a, 0x2c, 0x4f, 0x49, 0x2c, 0x49, 0x8c, 0xcf, 0x2b, 0x2e, 0xd1, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x12, 0x86, 0x2b, 0xd3, 0x83, 0x28, 0xd3, 0x2b, 0x33, 0x54, 0x0a, 0xe4, 0x62, 0xf5, + 0xc9, 0x17, 0x12, 0x86, 0x2b, 0xd3, 0x83, 0x28, 0xd3, 0x2b, 0x33, 0x54, 0x72, 0xe1, 0x62, 0xf5, 0x0b, 0x0e, 0xf1, 0x0e, 0x13, 0x52, 0xe4, 0xe2, 0x29, 0x2e, 0x49, 0xcc, 0x4e, 0x2d, 0x8a, 0xcf, 0xcc, 0x4b, 0x49, 0xad, 0x90, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x86, 0x88, 0x79, 0x82, - 0x84, 0x84, 0x54, 0xb9, 0xf8, 0x92, 0x12, 0x73, 0x12, 0xf3, 0x92, 0x53, 0xe3, 0x93, 0x33, 0x12, - 0xf3, 0xd2, 0x53, 0x25, 0x98, 0x14, 0x18, 0x35, 0x98, 0x83, 0x78, 0xa1, 0xa2, 0xce, 0x60, 0x41, - 0xa5, 0x08, 0x2e, 0xae, 0xa0, 0xc4, 0x72, 0x97, 0xc4, 0x92, 0x44, 0xbf, 0xe0, 0x10, 0x21, 0x2f, - 0x2e, 0xe1, 0xbc, 0xe2, 0x92, 0x78, 0x54, 0x8d, 0xc5, 0x12, 0x8c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, - 0x52, 0x7a, 0x58, 0xdc, 0xa4, 0x07, 0x76, 0x50, 0x90, 0x60, 0x5e, 0x71, 0x89, 0x13, 0xb2, 0xc1, - 0xc5, 0x4e, 0x6e, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, - 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, 0x2c, 0xc7, 0x10, 0xa5, 0x93, 0x9e, - 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0x32, 0x52, 0xb7, 0xa2, 0xb2, 0x4a, - 0x1f, 0x11, 0x2c, 0x15, 0xb0, 0x80, 0x29, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x07, 0x88, - 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x28, 0x64, 0x99, 0x8c, 0x39, 0x01, 0x00, 0x00, + 0x84, 0x84, 0x24, 0xb8, 0xd8, 0x93, 0x12, 0x73, 0x12, 0xf3, 0x92, 0x53, 0x25, 0x98, 0x14, 0x18, + 0x35, 0x98, 0x83, 0x60, 0x5c, 0xa5, 0x22, 0x2e, 0xae, 0xa0, 0xc4, 0x72, 0x97, 0xc4, 0x92, 0x44, + 0xbf, 0xe0, 0x10, 0x90, 0xba, 0xb2, 0xd4, 0xa2, 0xe2, 0xcc, 0xfc, 0x3c, 0xb0, 0x29, 0x2c, 0x41, + 0x30, 0xae, 0x90, 0x17, 0x97, 0x70, 0x5e, 0x71, 0x49, 0x3c, 0x54, 0x5b, 0x7c, 0x72, 0x46, 0x62, + 0x5e, 0x7a, 0x6a, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0x94, 0x1e, 0x16, 0x07, 0xea, + 0x81, 0x5d, 0x17, 0x24, 0x98, 0x57, 0x5c, 0xe2, 0x04, 0xd1, 0xe5, 0x0c, 0xd1, 0xe4, 0xe4, 0x76, + 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, + 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0x3a, 0xe9, 0x99, 0x25, 0x19, 0xa5, + 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x20, 0x23, 0x75, 0x2b, 0x2a, 0xab, 0xf4, 0x11, 0x61, 0x54, + 0x01, 0x0b, 0xa5, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0x70, 0xe8, 0x18, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x19, 0xc4, 0xa6, 0x05, 0x46, 0x01, 0x00, 0x00, } func (m *NSTKV) Marshal() (dAtA []byte, err error) { @@ -172,8 +181,8 @@ func (m *NSTKV) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.BalanceChange != 0 { - i = encodeVarintRawdataNst(dAtA, i, uint64(m.BalanceChange)) + if m.Balance != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.Balance)) i-- dAtA[i] = 0x10 } @@ -216,9 +225,14 @@ func (m *RawDataNST) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintRawdataNst(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } } + if m.Version != 0 { + i = encodeVarintRawdataNst(dAtA, i, uint64(m.Version)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -242,8 +256,8 @@ func (m *NSTKV) Size() (n int) { if m.StakerIndex != 0 { n += 1 + sovRawdataNst(uint64(m.StakerIndex)) } - if m.BalanceChange != 0 { - n += 1 + sovRawdataNst(uint64(m.BalanceChange)) + if m.Balance != 0 { + n += 1 + sovRawdataNst(uint64(m.Balance)) } return n } @@ -254,6 +268,9 @@ func (m *RawDataNST) Size() (n int) { } var l int _ = l + if m.Version != 0 { + n += 1 + sovRawdataNst(uint64(m.Version)) + } if len(m.NstBalanceChanges) > 0 { for _, e := range m.NstBalanceChanges { l = e.Size() @@ -319,9 +336,9 @@ func (m *NSTKV) Unmarshal(dAtA []byte) error { } case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BalanceChange", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Balance", wireType) } - m.BalanceChange = 0 + m.Balance = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowRawdataNst @@ -331,7 +348,7 @@ func (m *NSTKV) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.BalanceChange |= int64(b&0x7F) << shift + m.Balance |= int64(b&0x7F) << shift if b < 0x80 { break } @@ -387,6 +404,25 @@ func (m *RawDataNST) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + m.Version = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRawdataNst + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Version |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field NstBalanceChanges", wireType) } diff --git a/x/oracle/types/two_phases.pb.go b/x/oracle/types/two_phases.pb.go new file mode 100644 index 000000000..1d579da07 --- /dev/null +++ b/x/oracle/types/two_phases.pb.go @@ -0,0 +1,1145 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: imuachain/oracle/v1/two_phases.proto + +package types + +import ( + fmt "fmt" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Nonce is a message that contains a nonce for a feeder +type ValidatorIndex struct { + // FeederID is the ID of the feeder that corresponding to the nonce + Validator string `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator,omitempty"` + // value is the nonce value + NextIndex uint32 `protobuf:"varint,2,opt,name=next_index,json=nextIndex,proto3" json:"next_index,omitempty"` +} + +func (m *ValidatorIndex) Reset() { *m = ValidatorIndex{} } +func (m *ValidatorIndex) String() string { return proto.CompactTextString(m) } +func (*ValidatorIndex) ProtoMessage() {} +func (*ValidatorIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{0} +} +func (m *ValidatorIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorIndex.Merge(m, src) +} +func (m *ValidatorIndex) XXX_Size() int { + return m.Size() +} +func (m *ValidatorIndex) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorIndex proto.InternalMessageInfo + +func (m *ValidatorIndex) GetValidator() string { + if m != nil { + return m.Validator + } + return "" +} + +func (m *ValidatorIndex) GetNextIndex() uint32 { + if m != nil { + return m.NextIndex + } + return 0 +} + +// ValidatorNonce is a message that contains the nonces for a validator +type FeederValidatorsIndex struct { + // nonces is the list of nonces for the feeders + ValidatorIndexList []*ValidatorIndex `protobuf:"bytes,2,rep,name=validator_index_list,json=validatorIndexList,proto3" json:"validator_index_list,omitempty"` +} + +func (m *FeederValidatorsIndex) Reset() { *m = FeederValidatorsIndex{} } +func (m *FeederValidatorsIndex) String() string { return proto.CompactTextString(m) } +func (*FeederValidatorsIndex) ProtoMessage() {} +func (*FeederValidatorsIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{1} +} +func (m *FeederValidatorsIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FeederValidatorsIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FeederValidatorsIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FeederValidatorsIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_FeederValidatorsIndex.Merge(m, src) +} +func (m *FeederValidatorsIndex) XXX_Size() int { + return m.Size() +} +func (m *FeederValidatorsIndex) XXX_DiscardUnknown() { + xxx_messageInfo_FeederValidatorsIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_FeederValidatorsIndex proto.InternalMessageInfo + +func (m *FeederValidatorsIndex) GetValidatorIndexList() []*ValidatorIndex { + if m != nil { + return m.ValidatorIndexList + } + return nil +} + +type HashNode struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Hash []byte `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"` +} + +func (m *HashNode) Reset() { *m = HashNode{} } +func (m *HashNode) String() string { return proto.CompactTextString(m) } +func (*HashNode) ProtoMessage() {} +func (*HashNode) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{2} +} +func (m *HashNode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HashNode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HashNode.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HashNode) XXX_Merge(src proto.Message) { + xxx_messageInfo_HashNode.Merge(m, src) +} +func (m *HashNode) XXX_Size() int { + return m.Size() +} +func (m *HashNode) XXX_DiscardUnknown() { + xxx_messageInfo_HashNode.DiscardUnknown(m) +} + +var xxx_messageInfo_HashNode proto.InternalMessageInfo + +func (m *HashNode) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *HashNode) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +// Proof represents all hash nodes of a Mekle tree with indexes +type FlattenTree struct { + Nodes []*HashNode `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` +} + +func (m *FlattenTree) Reset() { *m = FlattenTree{} } +func (m *FlattenTree) String() string { return proto.CompactTextString(m) } +func (*FlattenTree) ProtoMessage() {} +func (*FlattenTree) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{3} +} +func (m *FlattenTree) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlattenTree) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_FlattenTree.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *FlattenTree) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlattenTree.Merge(m, src) +} +func (m *FlattenTree) XXX_Size() int { + return m.Size() +} +func (m *FlattenTree) XXX_DiscardUnknown() { + xxx_messageInfo_FlattenTree.DiscardUnknown(m) +} + +var xxx_messageInfo_FlattenTree proto.InternalMessageInfo + +func (m *FlattenTree) GetNodes() []*HashNode { + if m != nil { + return m.Nodes + } + return nil +} + +type TreeInfo struct { + LeafCount uint32 `protobuf:"varint,1,opt,name=leaf_count,json=leafCount,proto3" json:"leaf_count,omitempty"` + RootHash []byte `protobuf:"bytes,2,opt,name=root_hash,json=rootHash,proto3" json:"root_hash,omitempty"` +} + +func (m *TreeInfo) Reset() { *m = TreeInfo{} } +func (m *TreeInfo) String() string { return proto.CompactTextString(m) } +func (*TreeInfo) ProtoMessage() {} +func (*TreeInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_0762117ab9aef571, []int{4} +} +func (m *TreeInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TreeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_TreeInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *TreeInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_TreeInfo.Merge(m, src) +} +func (m *TreeInfo) XXX_Size() int { + return m.Size() +} +func (m *TreeInfo) XXX_DiscardUnknown() { + xxx_messageInfo_TreeInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_TreeInfo proto.InternalMessageInfo + +func (m *TreeInfo) GetLeafCount() uint32 { + if m != nil { + return m.LeafCount + } + return 0 +} + +func (m *TreeInfo) GetRootHash() []byte { + if m != nil { + return m.RootHash + } + return nil +} + +func init() { + proto.RegisterType((*ValidatorIndex)(nil), "imuachain.oracle.v1.ValidatorIndex") + proto.RegisterType((*FeederValidatorsIndex)(nil), "imuachain.oracle.v1.FeederValidatorsIndex") + proto.RegisterType((*HashNode)(nil), "imuachain.oracle.v1.HashNode") + proto.RegisterType((*FlattenTree)(nil), "imuachain.oracle.v1.FlattenTree") + proto.RegisterType((*TreeInfo)(nil), "imuachain.oracle.v1.TreeInfo") +} + +func init() { + proto.RegisterFile("imuachain/oracle/v1/two_phases.proto", fileDescriptor_0762117ab9aef571) +} + +var fileDescriptor_0762117ab9aef571 = []byte{ + // 352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xc1, 0x4e, 0xf2, 0x40, + 0x14, 0x85, 0x19, 0xfe, 0x1f, 0x43, 0x07, 0x75, 0x31, 0x62, 0xd2, 0x44, 0x69, 0x48, 0x75, 0xc1, + 0x42, 0xdb, 0x20, 0x3e, 0x01, 0x26, 0x8d, 0x24, 0xea, 0xa2, 0x51, 0x17, 0x6e, 0x9a, 0xa1, 0xbd, + 0xd8, 0x49, 0xca, 0x0c, 0xe9, 0x0c, 0xb5, 0xf8, 0x14, 0x3e, 0x96, 0x4b, 0x96, 0x2e, 0x0d, 0xbc, + 0x88, 0x99, 0x56, 0x40, 0x12, 0x76, 0x73, 0x4f, 0xce, 0x3d, 0xf7, 0xcb, 0x1c, 0x7c, 0xce, 0xc6, + 0x53, 0x1a, 0xc6, 0x94, 0x71, 0x57, 0xa4, 0x34, 0x4c, 0xc0, 0xcd, 0xba, 0xae, 0x7a, 0x13, 0xc1, + 0x24, 0xa6, 0x12, 0xa4, 0x33, 0x49, 0x85, 0x12, 0xe4, 0x68, 0xed, 0x72, 0x4a, 0x97, 0x93, 0x75, + 0xed, 0x7b, 0x7c, 0xf8, 0x4c, 0x13, 0x16, 0x51, 0x25, 0xd2, 0x01, 0x8f, 0x20, 0x27, 0xa7, 0xd8, + 0xc8, 0x56, 0x8a, 0x89, 0xda, 0xa8, 0x63, 0xf8, 0x1b, 0x81, 0xb4, 0x30, 0xe6, 0x90, 0xab, 0x80, + 0x69, 0xaf, 0x59, 0x6d, 0xa3, 0xce, 0x81, 0x6f, 0x68, 0xa5, 0x58, 0xb6, 0x39, 0x3e, 0xf6, 0x00, + 0x22, 0x48, 0xd7, 0xa1, 0xb2, 0x4c, 0x7d, 0xc2, 0xcd, 0x75, 0x48, 0xb9, 0x1c, 0x24, 0x4c, 0x2a, + 0xb3, 0xda, 0xfe, 0xd7, 0x69, 0x5c, 0x9d, 0x39, 0x3b, 0xd8, 0x9c, 0x6d, 0x30, 0x9f, 0x64, 0x5b, + 0xf3, 0x1d, 0x93, 0xca, 0xbe, 0xc6, 0xf5, 0x5b, 0x2a, 0xe3, 0x07, 0x11, 0x01, 0x69, 0xe2, 0x5a, + 0x49, 0x85, 0x0a, 0xaa, 0x72, 0x20, 0x04, 0xff, 0x8f, 0xa9, 0x8c, 0x0b, 0xd4, 0x7d, 0xbf, 0x78, + 0xdb, 0x7d, 0xdc, 0xf0, 0x12, 0xaa, 0x14, 0xf0, 0xc7, 0x14, 0x80, 0xf4, 0x70, 0x8d, 0x8b, 0x08, + 0xa4, 0x89, 0x0a, 0x98, 0xd6, 0x4e, 0x98, 0xd5, 0x19, 0xbf, 0xf4, 0xda, 0x1e, 0xae, 0xeb, 0xe5, + 0x01, 0x1f, 0x09, 0xfd, 0x29, 0x09, 0xd0, 0x51, 0x10, 0x8a, 0x29, 0x57, 0xbf, 0xe7, 0x0d, 0xad, + 0xdc, 0x68, 0x81, 0x9c, 0x60, 0x23, 0x15, 0x42, 0x05, 0x7f, 0x38, 0xea, 0x5a, 0xd0, 0x91, 0x7d, + 0xef, 0x73, 0x61, 0xa1, 0xf9, 0xc2, 0x42, 0xdf, 0x0b, 0x0b, 0x7d, 0x2c, 0xad, 0xca, 0x7c, 0x69, + 0x55, 0xbe, 0x96, 0x56, 0xe5, 0xe5, 0xe2, 0x95, 0xa9, 0x78, 0x3a, 0x74, 0x42, 0x31, 0x76, 0x35, + 0xd1, 0x65, 0x3e, 0x7b, 0x77, 0x37, 0x4d, 0xe7, 0xab, 0xae, 0xd5, 0x6c, 0x02, 0x72, 0xb8, 0x57, + 0x94, 0xdc, 0xfb, 0x09, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x9f, 0x5c, 0x2f, 0x0c, 0x02, 0x00, 0x00, +} + +func (m *ValidatorIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NextIndex != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.NextIndex)) + i-- + dAtA[i] = 0x10 + } + if len(m.Validator) > 0 { + i -= len(m.Validator) + copy(dAtA[i:], m.Validator) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.Validator))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *FeederValidatorsIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FeederValidatorsIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FeederValidatorsIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidatorIndexList) > 0 { + for iNdEx := len(m.ValidatorIndexList) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorIndexList[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTwoPhases(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + return len(dAtA) - i, nil +} + +func (m *HashNode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HashNode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HashNode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *FlattenTree) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlattenTree) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlattenTree) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Nodes) > 0 { + for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Nodes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTwoPhases(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TreeInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TreeInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TreeInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RootHash) > 0 { + i -= len(m.RootHash) + copy(dAtA[i:], m.RootHash) + i = encodeVarintTwoPhases(dAtA, i, uint64(len(m.RootHash))) + i-- + dAtA[i] = 0x12 + } + if m.LeafCount != 0 { + i = encodeVarintTwoPhases(dAtA, i, uint64(m.LeafCount)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintTwoPhases(dAtA []byte, offset int, v uint64) int { + offset -= sovTwoPhases(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ValidatorIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Validator) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + if m.NextIndex != 0 { + n += 1 + sovTwoPhases(uint64(m.NextIndex)) + } + return n +} + +func (m *FeederValidatorsIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorIndexList) > 0 { + for _, e := range m.ValidatorIndexList { + l = e.Size() + n += 1 + l + sovTwoPhases(uint64(l)) + } + } + return n +} + +func (m *HashNode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTwoPhases(uint64(m.Index)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + return n +} + +func (m *FlattenTree) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nodes) > 0 { + for _, e := range m.Nodes { + l = e.Size() + n += 1 + l + sovTwoPhases(uint64(l)) + } + } + return n +} + +func (m *TreeInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LeafCount != 0 { + n += 1 + sovTwoPhases(uint64(m.LeafCount)) + } + l = len(m.RootHash) + if l > 0 { + n += 1 + l + sovTwoPhases(uint64(l)) + } + return n +} + +func sovTwoPhases(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTwoPhases(x uint64) (n int) { + return sovTwoPhases(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ValidatorIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Validator = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextIndex", wireType) + } + m.NextIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextIndex |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FeederValidatorsIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FeederValidatorsIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FeederValidatorsIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorIndexList", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorIndexList = append(m.ValidatorIndexList, &ValidatorIndex{}) + if err := m.ValidatorIndexList[len(m.ValidatorIndexList)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HashNode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HashNode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HashNode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlattenTree) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlattenTree: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlattenTree: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nodes = append(m.Nodes, &HashNode{}) + if err := m.Nodes[len(m.Nodes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TreeInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TreeInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TreeInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeafCount", wireType) + } + m.LeafCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.LeafCount |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RootHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTwoPhases + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTwoPhases + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RootHash = append(m.RootHash[:0], dAtA[iNdEx:postIndex]...) + if m.RootHash == nil { + m.RootHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTwoPhases(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTwoPhases + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTwoPhases(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTwoPhases + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTwoPhases + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTwoPhases + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTwoPhases + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTwoPhases = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTwoPhases = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTwoPhases = fmt.Errorf("proto: unexpected end of group") +)