Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

RC v1.6.5 #72

Merged
merged 17 commits into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 11 additions & 12 deletions app/app.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,6 +142,7 @@ var (
ibcclientclient.UpgradeProposalHandler,
dfractclient.UpdateParamsProposalHandler,
millionsclient.RegisterPoolProposalHandler,
millionsclient.ClosePoolProposalHandler,
millionsclient.UpdatePoolProposalHandler,
millionsclient.UpdateParamsProposalHandler,
},
Expand Down Expand Up @@ -781,18 +782,6 @@ func (app *App) registerUpgradeHandlers() {
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
})

app.UpgradeKeeper.SetUpgradeHandler("v1.4.5", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
// Kill the first pool that shouldn't be used anymore after that upgrade
_, err := app.MillionsKeeper.UnsafeKillPool(ctx, 1)
if err != nil {
return fromVM, err
}

// Continue normal upgrade processing
app.Logger().Info("Pool killed. v1.4.5 upgrade applied")
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
})

app.UpgradeKeeper.SetUpgradeHandler("v1.5.0", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
app.Logger().Info("Starting v1.5.0 upgrade")

Expand Down Expand Up @@ -919,6 +908,11 @@ func (app *App) registerUpgradeHandlers() {
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
})

app.UpgradeKeeper.SetUpgradeHandler("v1.6.5", func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) {
app.Logger().Info("Starting v1.6.5 upgrade")
return app.mm.RunMigrations(ctx, app.configurator, fromVM)
})

upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk()
if err != nil {
panic(fmt.Sprintf("failed to read upgrade info from disk %s", err))
Expand Down Expand Up @@ -1020,4 +1014,9 @@ func (app *App) registerUpgradeHandlers() {
storeUpgrades := storetypes.StoreUpgrades{}
app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
}

if upgradeInfo.Name == "v1.6.5" && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) {
storeUpgrades := storetypes.StoreUpgrades{}
app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades))
}
}
9 changes: 9 additions & 0 deletions proto/lum/network/millions/gov.proto
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,15 @@ message ProposalUpdatePool {
repeated FeeTaker fee_takers = 11 [ (gogoproto.nullable) = false ];
}

message ProposalClosePool {
option (gogoproto.goproto_stringer) = false;

string title = 1;
string description = 2;

uint64 pool_id = 3;
}

message ProposalUpdateParams {
option (gogoproto.goproto_stringer) = false;

Expand Down
5 changes: 3 additions & 2 deletions proto/lum/network/millions/pool.proto
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@ enum PoolState {
POOL_STATE_CREATED = 1 [ (gogoproto.enumvalue_customname) = "Created" ];
POOL_STATE_READY = 2 [ (gogoproto.enumvalue_customname) = "Ready" ];
POOL_STATE_PAUSED = 3 [ (gogoproto.enumvalue_customname) = "Paused" ];
POOL_STATE_KILLED = 4 [ (gogoproto.enumvalue_customname) = "Killed" ];
POOL_STATE_CLOSING = 4 [ (gogoproto.enumvalue_customname) = "Closing" ];
POOL_STATE_CLOSED = 5 [ (gogoproto.enumvalue_customname) = "Closed" ];
}

// PoolType the type of Pool
Expand Down Expand Up @@ -119,8 +120,8 @@ message Pool {
cosmos.base.v1beta1.Coin available_prize_pool = 29
[ (gogoproto.nullable) = false ];
repeated FeeTaker fee_takers = 30 [ (gogoproto.nullable) = false ];
reserved 31;

reserved 31;
PoolState state = 32;

int64 created_at_height = 33;
Expand Down
4 changes: 4 additions & 0 deletions readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,10 @@ Please also make sure to have a look to the [contributing guidelines](https://gi

Information related to the Lum Network mainnet `lum-network-1` can be found in the [mainnet repository](https://github.com/lum-network/mainnet).

### v1.6.4 - `TODO` - Block `TODO`

`TODO`

### v1.6.4 - 2024-02-01 - Block 11390000
CosmosMillions: Make ICA channel restoration unlock all entities and revamp the fee system to allow for more than one fee taker.

Expand Down
84 changes: 84 additions & 0 deletions x/millions/client/cli/proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,18 @@ import (
"github.com/lum-network/chain/x/millions/types"
)

func parseClosePoolProposalFile(cdc codec.JSONCodec, proposalFile string) (proposal types.ProposalClosePool, err error) {
contents, err := os.ReadFile(proposalFile)
if err != nil {
return proposal, err
}

if err = cdc.UnmarshalJSON(contents, &proposal); err != nil {
return proposal, err
}
return proposal, nil
}

func parseRegisterPoolProposalFile(cdc codec.JSONCodec, proposalFile string) (proposal types.ProposalRegisterPool, err error) {
contents, err := os.ReadFile(proposalFile)
if err != nil {
Expand Down Expand Up @@ -235,6 +247,78 @@ Where proposal.json contains:
return cmd
}

func CmdProposalClosePool() *cobra.Command {
cmd := &cobra.Command{
Use: "millions-close-pool [proposal-file]",
Short: "Submit a millions close pool proposal",
Long: strings.TrimSpace(
fmt.Sprintf(`Submit a ClosePool proposal along with an initial deposit.
The proposal details must be supplied via a JSON file.

Example:
$ %s tx gov submit-legacy-proposal millions-close-pool <path/to/proposal.json> --from=<key_or_address>

Where proposal.json contains:
{
"title": "Close my pool",
"description": "This is my close pool",
"pool_id": 1
}
`, version.AppName),
),
Args: cobra.ExactArgs(1),
RunE: func(cmd *cobra.Command, args []string) error {
// Acquire the client context
clientCtx, err := client.GetClientTxContext(cmd)
if err != nil {
return err
}

// Parse the proposal file
proposal, err := parseClosePoolProposalFile(clientCtx.Codec, args[0])
if err != nil {
return err
}

if err := proposal.ValidateBasic(); err != nil {
return err
}

// Grab the parameters
from := clientCtx.GetFromAddress()

// Grab the deposit
depositStr, err := cmd.Flags().GetString(govcli.FlagDeposit)
if err != nil {
return err
}

deposit, err := sdk.ParseCoinsNormalized(depositStr)
if err != nil {
return err
}

msg, err := govtypes.NewMsgSubmitProposal(&proposal, deposit, from)
if err != nil {
return err
}

if err := msg.ValidateBasic(); err != nil {
return err
}

// Generate the transaction
return tx.GenerateOrBroadcastTxCLI(clientCtx, cmd.Flags(), msg)
},
}

cmd.Flags().String(govcli.FlagDeposit, "1ulum", "deposit of proposal")
if err := cmd.MarkFlagRequired(govcli.FlagDeposit); err != nil {
panic(err)
}
return cmd
}

func CmdProposalUpdateParams() *cobra.Command {
cmd := &cobra.Command{
Use: "millions-update-params [proposal-file]",
Expand Down
1 change: 1 addition & 0 deletions x/millions/client/proposal_handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,6 @@ import (
var (
RegisterPoolProposalHandler = govclient.NewProposalHandler(cli.CmdProposalRegisterPool)
UpdatePoolProposalHandler = govclient.NewProposalHandler(cli.CmdProposalUpdatePool)
ClosePoolProposalHandler = govclient.NewProposalHandler(cli.CmdProposalClosePool)
UpdateParamsProposalHandler = govclient.NewProposalHandler(cli.CmdProposalUpdateParams)
)
4 changes: 2 additions & 2 deletions x/millions/genesis_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ var testGenesis = millionstypes.GenesisState{
{PoolId: 3, PoolType: millionstypes.PoolType_Staking, TvlAmount: sdk.NewInt(601), DepositorsCount: 1, SponsorshipAmount: sdk.ZeroInt(), Denom: "denom-3", NativeDenom: "denom-3", NextDrawId: 1,
ChainId: "c1", Validators: defaultValidators, MinDepositAmount: sdk.NewInt(1_000_000), UnbondingDuration: time.Duration(millionstypes.DefaultUnbondingDuration), MaxUnbondingEntries: sdk.NewInt(millionstypes.DefaultMaxUnbondingEntries), AvailablePrizePool: sdk.NewCoin("denom-3", sdk.ZeroInt()),
DrawSchedule: defaultSchedule, PrizeStrategy: defaultPrizeStrat,
State: millionstypes.PoolState_Killed, Bech32PrefixAccAddr: "lum", Bech32PrefixValAddr: "lumvaloper",
State: millionstypes.PoolState_Closed, Bech32PrefixAccAddr: "lum", Bech32PrefixValAddr: "lumvaloper",
FeeTakers: []millionstypes.FeeTaker{
{Destination: authtypes.FeeCollectorName, Amount: sdk.NewDecWithPrec(millionstypes.DefaultFeeTakerAmount, 2), Type: millionstypes.FeeTakerType_LocalModuleAccount},
},
Expand All @@ -87,7 +87,7 @@ var testGenesis = millionstypes.GenesisState{
{PoolId: 5, PoolType: millionstypes.PoolType_Staking, TvlAmount: sdk.NewInt(0), DepositorsCount: 0, SponsorshipAmount: sdk.ZeroInt(), Denom: "denom-5", NativeDenom: "denom-5", NextDrawId: 1,
ChainId: "c1", Validators: defaultValidators, MinDepositAmount: sdk.NewInt(1_000_000), UnbondingDuration: time.Duration(millionstypes.DefaultUnbondingDuration), MaxUnbondingEntries: sdk.NewInt(millionstypes.DefaultMaxUnbondingEntries), AvailablePrizePool: sdk.NewCoin("denom-5", sdk.ZeroInt()),
DrawSchedule: defaultSchedule, PrizeStrategy: defaultPrizeStrat,
State: millionstypes.PoolState_Killed, Bech32PrefixAccAddr: "lum", Bech32PrefixValAddr: "lumvaloper",
State: millionstypes.PoolState_Closed, Bech32PrefixAccAddr: "lum", Bech32PrefixValAddr: "lumvaloper",
FeeTakers: []millionstypes.FeeTaker{
{Destination: authtypes.FeeCollectorName, Amount: sdk.NewDecWithPrec(millionstypes.DefaultFeeTakerAmount, 2), Type: millionstypes.FeeTakerType_LocalModuleAccount},
},
Expand Down
4 changes: 4 additions & 0 deletions x/millions/handler_proposal.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,10 @@ import (
func NewMillionsProposalHandler(k keeper.Keeper) govtypes.Handler {
return func(ctx sdk.Context, content govtypes.Content) error {
switch c := content.(type) {
case *types.ProposalClosePool:
{
return k.ClosePool(ctx, c.PoolId)
}
case *types.ProposalUpdatePool:
{
return k.UpdatePool(ctx, c.PoolId, c.Validators, c.MinDepositAmount, c.UnbondingDuration, c.MaxUnbondingEntries, c.DrawSchedule, c.PrizeStrategy, c.State, c.FeeTakers)
Expand Down
3 changes: 3 additions & 0 deletions x/millions/keeper/callbacks_bank_send.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,9 @@ func BankSendCallback(k Keeper, ctx sdk.Context, packet channeltypes.Packet, ack
return err
}

// Scenarios:
// - Timeout: Does nothing, handled when restoring the ICA channel
// - Error: Put entity in error state to allow users to retry
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a bank send to native packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
Expand Down
5 changes: 3 additions & 2 deletions x/millions/keeper/callbacks_claim.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ func ClaimCallback(k Keeper, ctx sdk.Context, packet channeltypes.Packet, ackRes
return errorsmod.Wrapf(types.ErrUnmarshalFailure, fmt.Sprintf("Unable to unmarshal claim callback args: %s", err.Error()))
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Does nothing, handled when restoring the ICA channel
// - Error: Put entity in error state to allow users to retry
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a claim packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
Expand Down
5 changes: 3 additions & 2 deletions x/millions/keeper/callbacks_delegate.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,9 @@ func DelegateCallback(k Keeper, ctx sdk.Context, packet channeltypes.Packet, ack
return errorsmod.Wrapf(types.ErrUnmarshalFailure, fmt.Sprintf("Unable to unmarshal delegate callback args: %s", err.Error()))
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Does nothing, handled when restoring the ICA channel
// - Error: Put entity in error state to allow users to retry
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a delegate packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
Expand Down
6 changes: 4 additions & 2 deletions x/millions/keeper/callbacks_redelegate.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,10 +45,12 @@ func RedelegateCallback(k Keeper, ctx sdk.Context, packet channeltypes.Packet, a
return err
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Treated as an error (see below)
// - Error: Revert Pool validator set known split
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a redelegate packet")
return k.OnRedelegateToActiveValidatorsOnRemoteZoneCompleted(ctx, redelegateCallback.GetPoolId(), redelegateCallback.GetOperatorAddress(), redelegateCallback.GetSplitDelegations(), true)
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
k.Logger(ctx).Debug("Received failure for a redelegate packet")
return k.OnRedelegateToActiveValidatorsOnRemoteZoneCompleted(ctx, redelegateCallback.GetPoolId(), redelegateCallback.GetOperatorAddress(), redelegateCallback.GetSplitDelegations(), true)
Expand Down
6 changes: 4 additions & 2 deletions x/millions/keeper/callbacks_set_withdraw_address.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,10 @@ func SetWithdrawAddressCallback(k Keeper, ctx sdk.Context, packet channeltypes.P
return err
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// This operation is done right after the ICA channel open procedure
// - Timeout: Does nothing - fix may be needed manually
// - Error: Does nothing - fix may be needed manually
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a set withdraw address packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
Expand Down
5 changes: 3 additions & 2 deletions x/millions/keeper/callbacks_transfer_from_native.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,8 +45,9 @@ func TransferFromNativeCallback(k Keeper, ctx sdk.Context, packet channeltypes.P
return err
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Does nothing, handled when restoring the ICA channel
// - Error: Put entity in error state to allow users to retry
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a transfer from native packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
Expand Down
6 changes: 4 additions & 2 deletions x/millions/keeper/callbacks_transfer_to_native.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,12 @@ func TransferToNativeCallback(k Keeper, ctx sdk.Context, packet channeltypes.Pac
return errorsmod.Wrapf(types.ErrUnmarshalFailure, fmt.Sprintf("Unable to unmarshal transfer to native callback args: %s", err.Error()))
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Treated as an error (see below)
// - Error: Put entity in error state to allow users to retry
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for a transfer to native packet")
return k.OnTransferDepositToRemoteZoneCompleted(ctx, transferCallback.GetPoolId(), transferCallback.GetDepositId(), true)
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
k.Logger(ctx).Debug("Received failure for a transfer to native packet")
return k.OnTransferDepositToRemoteZoneCompleted(ctx, transferCallback.GetPoolId(), transferCallback.GetDepositId(), true)
Expand Down
6 changes: 3 additions & 3 deletions x/millions/keeper/callbacks_undelegate.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,13 @@ func UndelegateCallback(k Keeper, ctx sdk.Context, packet channeltypes.Packet, a
return err
}

// If the response status is a timeout, that's not an "error" since the relayer will retry then fail or succeed.
// We just log it out and return no error
// Scenarios:
// - Timeout: Does nothing, handled when restoring the ICA channel
// - Error: Put back entities in the queue automatically
if ackResponse.Status == icacallbackstypes.AckResponseStatus_TIMEOUT {
k.Logger(ctx).Debug("Received timeout for an undelegate packet")
} else if ackResponse.Status == icacallbackstypes.AckResponseStatus_FAILURE {
k.Logger(ctx).Debug("Received failure for an undelegate packet")
// Failed OnUndelegateEpochUnbondingOnRemoteZoneCompleted
return k.OnUndelegateWithdrawalsOnRemoteZoneCompleted(
ctx,
undelegateCallback.PoolId,
Expand Down
9 changes: 9 additions & 0 deletions x/millions/keeper/keeper_deposit.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,15 @@ func (k Keeper) OnDelegateDepositOnRemoteZoneCompleted(ctx sdk.Context, poolID u
pool.ApplySplitDelegate(ctx, splits)
k.updatePool(ctx, &pool)
k.UpdateDepositStatus(ctx, poolID, depositID, types.DepositState_Success, false)

if pool.State == types.PoolState_Closing {
// Continue closing procedure
// voluntary ignore errors
if err := k.ClosePool(ctx, poolID); err != nil {
k.Logger(ctx).With("ctx", "deposit_completed", "pool_id", poolID).Error("Silently failed to continue close pool procedure: %v", err)
}
}

return nil
}

Expand Down
Loading
Loading