diff --git a/app/ante/ante.go b/app/ante/ante.go index 35a0b468a..e0b86cafb 100644 --- a/app/ante/ante.go +++ b/app/ante/ante.go @@ -9,7 +9,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ante "github.com/cosmos/cosmos-sdk/x/auth/ante" "github.com/cosmos/cosmos-sdk/x/auth/signing" - tfmwKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + tfmwKeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" ) // Link to default ante handler used by cosmos sdk: diff --git a/app/ante/ibc_ante.go b/app/ante/ibc_ante.go index a9a8c3edc..ebae1b5b7 100644 --- a/app/ante/ibc_ante.go +++ b/app/ante/ibc_ante.go @@ -9,7 +9,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/authz" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" - tfmwKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + tfmwKeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" ) type IBCPermissionDecorator struct { diff --git a/app/app.go b/app/app.go index feebeaccc..e46157207 100644 --- a/app/app.go +++ b/app/app.go @@ -11,7 +11,6 @@ import ( authante "github.com/cosmos/cosmos-sdk/x/auth/ante" authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" "github.com/cosmos/cosmos-sdk/x/consensus" - porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" tendermint "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" wasm08 "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm" wasm08types "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/types" @@ -27,13 +26,15 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/x/auth" - authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/cosmos-sdk/x/auth/vesting" vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" "github.com/cosmos/cosmos-sdk/x/bank" - bech32stakingmigration "github.com/notional-labs/centauri/v3/bech32-migration/staking" + bech32stakingmigration "github.com/notional-labs/centauri/v4/bech32-migration/staking" + + "github.com/notional-labs/centauri/v4/app/keepers" + v4 "github.com/notional-labs/centauri/v4/app/upgrades/v4" // bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" @@ -41,28 +42,20 @@ import ( capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" "github.com/cosmos/cosmos-sdk/x/crisis" - crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" distr "github.com/cosmos/cosmos-sdk/x/distribution" - distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" "github.com/cosmos/cosmos-sdk/x/evidence" - evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" "github.com/cosmos/cosmos-sdk/x/feegrant" - feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module" "github.com/cosmos/cosmos-sdk/x/genutil" genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" "github.com/cosmos/cosmos-sdk/x/gov" govclient "github.com/cosmos/cosmos-sdk/x/gov/client" - govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - govtypesv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" - govtypesv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" "github.com/cosmos/cosmos-sdk/x/group" - groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" groupmodule "github.com/cosmos/cosmos-sdk/x/group/module" dbm "github.com/cometbft/cometbft-db" @@ -72,72 +65,62 @@ import ( tmos "github.com/cometbft/cometbft/libs/os" "github.com/cosmos/cosmos-sdk/x/params" paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" - paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" - paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" "github.com/cosmos/cosmos-sdk/x/slashing" - slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" "github.com/cosmos/cosmos-sdk/x/staking" - stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" "github.com/cosmos/cosmos-sdk/x/upgrade" upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" - upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + ica "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts" + icatypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/types" "github.com/cosmos/ibc-go/v7/modules/apps/transfer" ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper" ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" ibc "github.com/cosmos/ibc-go/v7/modules/core" - ibcclient "github.com/cosmos/ibc-go/v7/modules/core/02-client" ibcclientclient "github.com/cosmos/ibc-go/v7/modules/core/02-client/client" - ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" ibchost "github.com/cosmos/ibc-go/v7/modules/core/exported" ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper" "github.com/spf13/cast" icq "github.com/strangelove-ventures/async-icq/v7" - icqkeeper "github.com/strangelove-ventures/async-icq/v7/keeper" icqtypes "github.com/strangelove-ventures/async-icq/v7/types" - reward "github.com/notional-labs/centauri/v3/app/upgrade/reward" - custombankmodule "github.com/notional-labs/centauri/v3/custom/bank" - custombankkeeper "github.com/notional-labs/centauri/v3/custom/bank/keeper" + custombankmodule "github.com/notional-labs/centauri/v4/custom/bank" "github.com/strangelove-ventures/packet-forward-middleware/v7/router" - routerkeeper "github.com/strangelove-ventures/packet-forward-middleware/v7/router/keeper" routertypes "github.com/strangelove-ventures/packet-forward-middleware/v7/router/types" alliancemodule "github.com/terra-money/alliance/x/alliance" alliancemoduleclient "github.com/terra-money/alliance/x/alliance/client" - alliancemodulekeeper "github.com/terra-money/alliance/x/alliance/keeper" alliancemoduletypes "github.com/terra-money/alliance/x/alliance/types" - "github.com/notional-labs/centauri/v3/app/ante" - transfermiddleware "github.com/notional-labs/centauri/v3/x/transfermiddleware" - transfermiddlewarekeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" - transfermiddlewaretypes "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/app/ante" + transfermiddleware "github.com/notional-labs/centauri/v4/x/transfermiddleware" + transfermiddlewaretypes "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" + + ratelimitmodule "github.com/notional-labs/centauri/v4/x/ratelimit" + ratelimitmoduletypes "github.com/notional-labs/centauri/v4/x/ratelimit/types" - consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" - "github.com/notional-labs/centauri/v3/x/mint" - mintkeeper "github.com/notional-labs/centauri/v3/x/mint/keeper" - minttypes "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" - // this line is used by starport scaffolding # stargate/app/moduleImport - storetypes "github.com/cosmos/cosmos-sdk/store/types" - wasm08Keeper "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" ibctestingtypes "github.com/cosmos/ibc-go/v7/testing/types" + ibc_hooks "github.com/notional-labs/centauri/v4/x/ibc-hooks" + ibchookstypes "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" + "github.com/CosmWasm/wasmd/x/wasm" wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + + upgrades "github.com/notional-labs/centauri/v4/app/upgrades" ) const ( - AccountAddressPrefix = "centauri" - Name = "centauri" - dirName = "banksy" - authorityAddress = "centauri10556m38z4x6pqalr9rl5ytf3cff8q46nk85k9m" - ForkHeight = 244008 + Name = "centauri" + dirName = "banksy" + ForkHeight = 244008 ) var ( @@ -148,6 +131,8 @@ var ( // of "EnableAllProposals" (takes precedence over ProposalsEnabled) // https://github.com/CosmWasm/wasmd/blob/02a54d33ff2c064f3539ae12d75d027d9c665f05/x/wasm/internal/types/proposal.go#L28-L34 EnableSpecificProposals = "" + + Upgrades = []upgrades.Upgrade{v4.Upgrade} ) // GetEnabledProposals parses the ProposalsEnabled / EnableSpecificProposals values to @@ -219,7 +204,10 @@ var ( wasm08.AppModuleBasic{}, wasm.AppModuleBasic{}, router.AppModuleBasic{}, + ica.AppModuleBasic{}, + ibc_hooks.AppModuleBasic{}, transfermiddleware.AppModuleBasic{}, + ratelimitmodule.AppModuleBasic{}, consensus.AppModuleBasic{}, alliancemodule.AppModuleBasic{}, // this line is used by starport scaffolding # stargate/app/moduleBasic @@ -238,6 +226,7 @@ var ( ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, alliancemoduletypes.ModuleName: {authtypes.Minter, authtypes.Burner}, alliancemoduletypes.RewardsPoolName: nil, + icatypes.ModuleName: nil, // this line is used by starport scaffolding # stargate/app/maccPerms } ) @@ -260,6 +249,7 @@ func init() { // capabilities aren't needed for testing. type CentauriApp struct { *baseapp.BaseApp + keepers.AppKeepers cdc *codec.LegacyAmino appCodec codec.Codec @@ -267,41 +257,6 @@ type CentauriApp struct { invCheckPeriod uint - // keys to access the substores - keys map[string]*storetypes.KVStoreKey - tkeys map[string]*storetypes.TransientStoreKey - memKeys map[string]*storetypes.MemoryStoreKey - - // keepers - AccountKeeper authkeeper.AccountKeeper - BankKeeper custombankkeeper.Keeper - CapabilityKeeper *capabilitykeeper.Keeper - StakingKeeper *stakingkeeper.Keeper - SlashingKeeper slashingkeeper.Keeper - MintKeeper mintkeeper.Keeper - DistrKeeper distrkeeper.Keeper - GovKeeper govkeeper.Keeper - CrisisKeeper *crisiskeeper.Keeper - UpgradeKeeper *upgradekeeper.Keeper - ParamsKeeper paramskeeper.Keeper - IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly - EvidenceKeeper evidencekeeper.Keeper - TransferKeeper ibctransferkeeper.Keeper - ICQKeeper icqkeeper.Keeper - FeeGrantKeeper feegrantkeeper.Keeper - GroupKeeper groupkeeper.Keeper - Wasm08Keeper wasm08Keeper.Keeper // TODO: use this name ? - WasmKeeper wasm.Keeper - // make scoped keepers public for test purposes - ScopedIBCKeeper capabilitykeeper.ScopedKeeper - ScopedTransferKeeper capabilitykeeper.ScopedKeeper - ScopedWasmKeeper capabilitykeeper.ScopedKeeper - ConsensusParamsKeeper consensusparamkeeper.Keeper - // this line is used by starport scaffolding # stargate/app/keeperDeclaration - TransferMiddlewareKeeper transfermiddlewarekeeper.Keeper - RouterKeeper *routerkeeper.Keeper - AllianceKeeper alliancemodulekeeper.Keeper - mm *module.Manager sm *module.SimulationManager configurator module.Configurator @@ -332,242 +287,44 @@ func NewCentauriApp( bApp.SetInterfaceRegistry(interfaceRegistry) bApp.SetTxEncoder(encodingConfig.TxConfig.TxEncoder()) - keys := sdk.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, - evidencetypes.StoreKey, ibctransfertypes.StoreKey, icqtypes.StoreKey, capabilitytypes.StoreKey, consensusparamtypes.StoreKey, wasm08types.StoreKey, - crisistypes.StoreKey, routertypes.StoreKey, transfermiddlewaretypes.StoreKey, group.StoreKey, minttypes.StoreKey, alliancemoduletypes.StoreKey, wasm.StoreKey, - // this line is used by starport scaffolding # stargate/app/storeKey - ) - tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) - memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) - app := &CentauriApp{ BaseApp: bApp, + AppKeepers: keepers.AppKeepers{}, cdc: cdc, appCodec: appCodec, interfaceRegistry: interfaceRegistry, invCheckPeriod: invCheckPeriod, - keys: keys, - tkeys: tkeys, - memKeys: memKeys, } - app.ParamsKeeper = initParamsKeeper(appCodec, cdc, keys[paramstypes.StoreKey], tkeys[paramstypes.TStoreKey]) - - // set the BaseApp's parameter store - app.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, keys[consensusparamtypes.StoreKey], authtypes.NewModuleAddress(govtypes.ModuleName).String()) - bApp.SetParamStore(&app.ConsensusParamsKeeper) - - // add capability keeper and ScopeToModule for ibc module - app.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, keys[capabilitytypes.StoreKey], memKeys[capabilitytypes.MemStoreKey]) - - // grant capabilities for the ibc and ibc-transfer modules - scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) - scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) - scopedICQKeeper := app.CapabilityKeeper.ScopeToModule(icqtypes.ModuleName) - scopedWasmKeeper := app.CapabilityKeeper.ScopeToModule(wasm.ModuleName) - // this line is used by starport scaffolding # stargate/app/scopedKeeper - - // add keepers - app.AccountKeeper = authkeeper.NewAccountKeeper( - appCodec, keys[authtypes.StoreKey], authtypes.ProtoBaseAccount, maccPerms, AccountAddressPrefix, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - app.BankKeeper = custombankkeeper.NewBaseKeeper( - appCodec, keys[banktypes.StoreKey], app.AccountKeeper, app.BlacklistedModuleAccountAddrs(), &app.TransferMiddlewareKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - app.StakingKeeper = stakingkeeper.NewKeeper( - appCodec, keys[stakingtypes.StoreKey], app.AccountKeeper, app.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - app.MintKeeper = mintkeeper.NewKeeper( - appCodec, keys[minttypes.StoreKey], app.StakingKeeper, - app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - app.DistrKeeper = distrkeeper.NewKeeper( - appCodec, keys[distrtypes.StoreKey], app.AccountKeeper, app.BankKeeper, - app.StakingKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - app.SlashingKeeper = slashingkeeper.NewKeeper( - appCodec, encodingConfig.Amino, keys[slashingtypes.StoreKey], app.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - app.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, keys[crisistypes.StoreKey], - invCheckPeriod, app.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - groupConfig := group.DefaultConfig() - /* - Example of setting group params: - groupConfig.MaxMetadataLen = 1000 - */ - app.GroupKeeper = groupkeeper.NewKeeper( - keys[group.StoreKey], + app.InitSpecialKeepers( appCodec, - app.MsgServiceRouter(), - app.AccountKeeper, - groupConfig, + cdc, + bApp, + invCheckPeriod, + skipUpgradeHeights, + homePath, ) - - app.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, keys[feegrant.StoreKey], app.AccountKeeper) - app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) - - app.AllianceKeeper = alliancemodulekeeper.NewKeeper( + app.setupUpgradeStoreLoaders() + app.InitNormalKeepers( appCodec, - keys[alliancemoduletypes.StoreKey], - app.GetSubspace(alliancemoduletypes.ModuleName), - app.AccountKeeper, - app.BankKeeper, - app.StakingKeeper, - app.DistrKeeper, - ) - - app.BankKeeper.RegisterKeepers(app.AllianceKeeper, app.StakingKeeper) - // register the staking hooks - // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks - app.StakingKeeper.SetHooks( - stakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks(), app.SlashingKeeper.Hooks(), app.AllianceKeeper.StakingHooks()), - ) - - // ... other modules keepers - - // Create IBC Keeper - app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, keys[ibchost.StoreKey], app.GetSubspace(ibchost.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper, - ) - - app.Wasm08Keeper = wasm08Keeper.NewKeeper(appCodec, app.keys[wasmtypes.StoreKey], authorityAddress, homePath) - // Create Transfer Keepers - app.TransferMiddlewareKeeper = transfermiddlewarekeeper.NewKeeper( - keys[transfermiddlewaretypes.StoreKey], - appCodec, - app.IBCKeeper.ChannelKeeper, - &app.TransferKeeper, - app.BankKeeper, - authorityAddress, - ) - - app.TransferKeeper = ibctransferkeeper.NewKeeper( - appCodec, keys[ibctransfertypes.StoreKey], - app.GetSubspace(ibctransfertypes.ModuleName), - &app.TransferMiddlewareKeeper, // ICS4Wrapper - app.IBCKeeper.ChannelKeeper, - &app.IBCKeeper.PortKeeper, - app.AccountKeeper, - app.BankKeeper, - scopedTransferKeeper, - ) - - app.RouterKeeper = routerkeeper.NewKeeper( - appCodec, - app.keys[routertypes.StoreKey], - app.GetSubspace(routertypes.ModuleName), - app.TransferKeeper, - app.IBCKeeper.ChannelKeeper, - &app.DistrKeeper, - app.BankKeeper, - app.TransferMiddlewareKeeper, - app.IBCKeeper.ChannelKeeper, + cdc, + bApp, + maccPerms, + invCheckPeriod, + skipUpgradeHeights, + homePath, + appOpts, + wasmOpts, + enabledProposals, ) transferModule := transfer.NewAppModule(app.TransferKeeper) - transferIBCModule := transfer.NewIBCModule(app.TransferKeeper) routerModule := router.NewAppModule(app.RouterKeeper) transfermiddlewareModule := transfermiddleware.NewAppModule(&app.TransferMiddlewareKeeper) - - app.ICQKeeper = icqkeeper.NewKeeper( - appCodec, keys[icqtypes.StoreKey], app.GetSubspace(icqtypes.ModuleName), - app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, &app.IBCKeeper.PortKeeper, - scopedICQKeeper, app.BaseApp, - ) + ratelimitModule := ratelimitmodule.NewAppModule(&app.RatelimitKeeper) icqModule := icq.NewAppModule(app.ICQKeeper) - icqIBCModule := icq.NewIBCModule(app.ICQKeeper) - transfermiddlewareStack := transfermiddleware.NewIBCMiddleware( - transferIBCModule, - app.TransferMiddlewareKeeper, - ) - - ibcMiddlewareStack := router.NewIBCMiddleware( - transfermiddlewareStack, - app.RouterKeeper, - 0, - routerkeeper.DefaultForwardTransferPacketTimeoutTimestamp, - routerkeeper.DefaultRefundTransferPacketTimeoutTimestamp, - ) - - // Create evidence Keeper for to register the IBC light client misbehaviour evidence route - evidenceKeeper := evidencekeeper.NewKeeper( - appCodec, keys[evidencetypes.StoreKey], app.StakingKeeper, app.SlashingKeeper, - ) - // If evidence needs to be handled for the app, set routes in router here and seal - app.EvidenceKeeper = *evidenceKeeper - - wasmDir := filepath.Join(homePath, "wasm") - wasmConfig, err := wasm.ReadWasmConfig(appOpts) - if err != nil { - panic(fmt.Sprintf("error while reading wasm config: %s", err)) - } - - // The last arguments can contain custom message handlers, and custom query handlers, - // if we want to allow any custom callbacks - availableCapabilities := strings.Join(AllCapabilities(), ",") - app.WasmKeeper = wasm.NewKeeper( - appCodec, - keys[wasm.StoreKey], - app.AccountKeeper, - app.BankKeeper, - app.StakingKeeper, - distrkeeper.NewQuerier(app.DistrKeeper), - app.IBCKeeper.ChannelKeeper, // ISC4 Wrapper: fee IBC middleware - app.IBCKeeper.ChannelKeeper, - &app.IBCKeeper.PortKeeper, - scopedWasmKeeper, - app.TransferKeeper, - app.MsgServiceRouter(), - app.GRPCQueryRouter(), - wasmDir, - wasmConfig, - availableCapabilities, - authtypes.NewModuleAddress(govtypes.ModuleName).String(), - wasmOpts..., - ) - - // Register Gov (must be registered after stakeibc) - govRouter := govtypesv1beta1.NewRouter() - govRouter.AddRoute(govtypes.RouterKey, govtypesv1beta1.ProposalHandler). - AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)). - // AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)). - AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)). - AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(app.IBCKeeper.ClientKeeper)). - AddRoute(alliancemoduletypes.RouterKey, alliancemodule.NewAllianceProposalHandler(app.AllianceKeeper)) - - // The gov proposal types can be individually enabled - if len(enabledProposals) != 0 { - govRouter.AddRoute(wasm.RouterKey, wasm.NewWasmProposalHandler(app.WasmKeeper, enabledProposals)) - } - - govKeeper := *govkeeper.NewKeeper( - appCodec, keys[govtypes.StoreKey], app.AccountKeeper, app.BankKeeper, - app.StakingKeeper, app.MsgServiceRouter(), govtypes.DefaultConfig(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), - ) - - govKeeper.SetLegacyRouter(govRouter) - - app.GovKeeper = *govKeeper.SetHooks( - govtypes.NewMultiGovHooks( - // register the governance hooks - ), - ) - - ibcRouter := porttypes.NewRouter() - ibcRouter.AddRoute(ibctransfertypes.ModuleName, ibcMiddlewareStack) - ibcRouter.AddRoute(icqtypes.ModuleName, icqIBCModule) - ibcRouter.AddRoute(wasm.ModuleName, wasm.NewIBCHandler(app.WasmKeeper, app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper)) - - // this line is used by starport scaffolding # ibc/app/router - app.IBCKeeper.SetRouter(ibcRouter) - + ibcHooksModule := ibc_hooks.NewAppModule() + icaModule := ica.NewAppModule(nil, &app.ICAHostKeeper) // Only ICA Host /**** Module Options ****/ // NOTE: we may consider parsing `appOpts` inside module constructors. For the moment @@ -600,11 +357,14 @@ func NewCentauriApp( params.NewAppModule(app.ParamsKeeper), transferModule, icqModule, + ibcHooksModule, consensus.NewAppModule(appCodec, app.ConsensusParamsKeeper), wasm08.NewAppModule(app.Wasm08Keeper), wasm.NewAppModule(appCodec, &app.WasmKeeper, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.MsgServiceRouter(), app.GetSubspace(wasmtypes.ModuleName)), routerModule, transfermiddlewareModule, + icaModule, + ratelimitModule, alliancemodule.NewAppModule(appCodec, app.AllianceKeeper, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), // this line is used by starport scaffolding # stargate/app/appModule ) @@ -626,6 +386,8 @@ func NewCentauriApp( ibctransfertypes.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, + ibchookstypes.ModuleName, icqtypes.ModuleName, authtypes.ModuleName, banktypes.ModuleName, @@ -637,6 +399,7 @@ func NewCentauriApp( paramstypes.ModuleName, consensusparamtypes.ModuleName, wasm08types.ModuleName, + icatypes.ModuleName, wasm.ModuleName, alliancemoduletypes.ModuleName, // this line is used by starport scaffolding # stargate/app/beginBlockers @@ -662,10 +425,13 @@ func NewCentauriApp( ibchost.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, + ibchookstypes.ModuleName, ibctransfertypes.ModuleName, icqtypes.ModuleName, consensusparamtypes.ModuleName, wasm08types.ModuleName, + icatypes.ModuleName, wasm.ModuleName, alliancemoduletypes.ModuleName, ) @@ -695,10 +461,13 @@ func NewCentauriApp( icqtypes.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, + ibchookstypes.ModuleName, feegrant.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, wasm08types.ModuleName, + icatypes.ModuleName, wasm.ModuleName, alliancemoduletypes.ModuleName, // this line is used by starport scaffolding # stargate/app/initGenesis @@ -708,6 +477,8 @@ func NewCentauriApp( app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) app.mm.RegisterServices(app.configurator) + app.setupUpgradeHandlers() + // create the simulation manager and define the order of the modules for deterministic simulations // app.sm = module.NewSimulationManager( // auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts), @@ -734,9 +505,9 @@ func NewCentauriApp( // app.sm.RegisterStoreDecoders() // initialize stores - app.MountKVStores(keys) - app.MountTransientStores(tkeys) - app.MountMemoryStores(memKeys) + app.MountKVStores(app.GetKVStoreKey()) + app.MountTransientStores(app.GetTransientStoreKey()) + app.MountMemoryStores(app.GetMemoryStoreKey()) // initialize BaseApp app.SetInitChainer(app.InitChainer) @@ -768,12 +539,6 @@ func NewCentauriApp( } } - app.ScopedIBCKeeper = scopedIBCKeeper - app.ScopedTransferKeeper = scopedTransferKeeper - app.ScopedWasmKeeper = scopedWasmKeeper - // app.ScopedMonitoringKeeper = scopedMonitoringKeeper - app.UpgradeKeeper.SetUpgradeHandler(reward.UpgradeName, reward.CreateUpgradeHandler(app.mm, app.configurator, app.TransferMiddlewareKeeper, app.MintKeeper)) - return app } @@ -812,7 +577,7 @@ func (app *CentauriApp) GetTxConfig() client.TxConfig { // BeginBlocker application updates every begin block func (app *CentauriApp) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { if ctx.BlockHeight() == ForkHeight { - bech32stakingmigration.MigrateUnbonding(ctx, app.keys[stakingtypes.StoreKey], app.appCodec) + bech32stakingmigration.MigrateUnbonding(ctx, app.GetKey(stakingtypes.StoreKey), app.appCodec) } return app.mm.BeginBlock(ctx, req) @@ -849,16 +614,6 @@ func (app *CentauriApp) ModuleAccountAddrs() map[string]bool { return modAccAddrs } -// ModuleAccountAddrs returns all the app's module account addresses. -func (app *CentauriApp) BlacklistedModuleAccountAddrs() map[string]bool { - modAccAddrs := make(map[string]bool) - // DO NOT REMOVE: StringMapKeys fixes non-deterministic map iteration - for acc := range maccPerms { - modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true - } - return modAccAddrs -} - // LegacyAmino returns SimApp's amino codec. // // NOTE: This is solely to be used for testing purposes as it may be desirable @@ -880,35 +635,6 @@ func (app *CentauriApp) InterfaceRegistry() types.InterfaceRegistry { return app.interfaceRegistry } -// GetKey returns the KVStoreKey for the provided store key. -// -// NOTE: This is solely to be used for testing purposes. -func (app *CentauriApp) GetKey(storeKey string) *storetypes.KVStoreKey { - return app.keys[storeKey] -} - -// GetTKey returns the TransientStoreKey for the provided store key. -// -// NOTE: This is solely to be used for testing purposes. -func (app *CentauriApp) GetTKey(storeKey string) *storetypes.TransientStoreKey { - return app.tkeys[storeKey] -} - -// GetMemKey returns the MemStoreKey for the provided mem key. -// -// NOTE: This is solely used for testing purposes. -func (app *CentauriApp) GetMemKey(storeKey string) *storetypes.MemoryStoreKey { - return app.memKeys[storeKey] -} - -// GetSubspace returns a param subspace for a given module name. -// -// NOTE: This is solely to be used for testing purposes. -func (app *CentauriApp) GetSubspace(moduleName string) paramstypes.Subspace { - subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) - return subspace -} - // RegisterAPIRoutes registers all application module routes with the provided // API server. func (app *CentauriApp) RegisterAPIRoutes(apiSvr *api.Server, _ config.APIConfig) { @@ -947,29 +673,51 @@ func GetMaccPerms() map[string][]string { return dupMaccPerms } -// initParamsKeeper init params keeper and its subspaces -func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { - paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) - - paramsKeeper.Subspace(authtypes.ModuleName) - paramsKeeper.Subspace(banktypes.ModuleName) - paramsKeeper.Subspace(stakingtypes.ModuleName) - paramsKeeper.Subspace(distrtypes.ModuleName) - paramsKeeper.Subspace(slashingtypes.ModuleName) - paramsKeeper.Subspace(routertypes.ModuleName).WithKeyTable(routertypes.ParamKeyTable()) // TODO: - paramsKeeper.Subspace(govtypes.ModuleName).WithKeyTable(govtypesv1.ParamKeyTable()) //nolint:staticcheck - paramsKeeper.Subspace(minttypes.ModuleName).WithKeyTable(minttypes.ParamKeyTable()) - paramsKeeper.Subspace(crisistypes.ModuleName) - paramsKeeper.Subspace(ibctransfertypes.ModuleName) - paramsKeeper.Subspace(icqtypes.ModuleName) - paramsKeeper.Subspace(ibchost.ModuleName) - paramsKeeper.Subspace(alliancemoduletypes.ModuleName) - paramsKeeper.Subspace(wasm.ModuleName) - - return paramsKeeper -} - // SimulationManager implements the SimulationApp interface func (app *CentauriApp) SimulationManager() *module.SimulationManager { return app.sm } + +// configure store loader that checks if version == upgradeHeight and applies store upgrades +func (app *CentauriApp) setupUpgradeStoreLoaders() { + upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() + if err != nil { + panic(fmt.Sprintf("failed to read upgrade info from disk %s", err)) + } + + if app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + return + } + + currentHeight := app.CommitMultiStore().LastCommitID().Version + + if upgradeInfo.Height == currentHeight+1 { + app.customPreUpgradeHandler(upgradeInfo) + } + + for _, upgrade := range Upgrades { + if upgradeInfo.Name == upgrade.UpgradeName { + app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &upgrade.StoreUpgrades)) + } + } +} + +func (app *CentauriApp) customPreUpgradeHandler(upgradeInfo upgradetypes.Plan) { + switch upgradeInfo.Name { + default: + } +} + +func (app *CentauriApp) setupUpgradeHandlers() { + for _, upgrade := range Upgrades { + app.UpgradeKeeper.SetUpgradeHandler( + upgrade.UpgradeName, + upgrade.CreateUpgradeHandler( + app.mm, + app.configurator, + app.BaseApp, + &app.AppKeepers, + ), + ) + } +} diff --git a/app/helpers/test_helpers.go b/app/helpers/test_helpers.go index 49d5ba954..5c5db303f 100644 --- a/app/helpers/test_helpers.go +++ b/app/helpers/test_helpers.go @@ -21,7 +21,7 @@ import ( authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" "github.com/cosmos/ibc-go/v7/testing/mock" - centauri "github.com/notional-labs/centauri/v3/app" + centauri "github.com/notional-labs/centauri/v4/app" "github.com/stretchr/testify/require" ) diff --git a/app/ibctesting/chain.go b/app/ibctesting/chain.go index 586d67821..c61d29598 100644 --- a/app/ibctesting/chain.go +++ b/app/ibctesting/chain.go @@ -2,10 +2,14 @@ package ibctesting import ( "bytes" + "crypto/sha256" "fmt" + "os" "testing" "time" + ratelimitmodulekeeper "github.com/notional-labs/centauri/v4/x/ratelimit/keeper" + "cosmossdk.io/errors" abci "github.com/cometbft/cometbft/abci/types" "github.com/cometbft/cometbft/crypto/tmhash" @@ -19,12 +23,20 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" sdk "github.com/cosmos/cosmos-sdk/types" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + "github.com/CosmWasm/wasmd/x/wasm" + wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" teststaking "github.com/cosmos/cosmos-sdk/x/staking/testutil" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" @@ -39,10 +51,11 @@ import ( ibctesting "github.com/cosmos/ibc-go/v7/testing" "github.com/cosmos/ibc-go/v7/testing/mock" ibctestingtypes "github.com/cosmos/ibc-go/v7/testing/types" - centauri "github.com/notional-labs/centauri/v3/app" - "github.com/notional-labs/centauri/v3/app/ibctesting/simapp" - routerKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + centauri "github.com/notional-labs/centauri/v4/app" + "github.com/notional-labs/centauri/v4/app/ibctesting/simapp" + routerKeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" ) // TestChain is a testing struct that wraps a simapp with the last TM Header, the current ABCI @@ -111,7 +124,7 @@ func NewTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain { Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, amount)), } - app := NewTestingAppDecorator(t, centauri.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)) + app := NewTestingAppDecorator(t, centauri.SetupWithGenesisValSet(t, coord.CurrentTime.UTC(), valSet, []authtypes.GenesisAccount{acc}, balance)) // create current header and call begin block header := tmproto.Header{ @@ -605,6 +618,10 @@ func (chain *TestChain) TransferMiddleware() routerKeeper.Keeper { return chain.GetTestSupport().TransferMiddleware() } +func (chain *TestChain) RateLimit() ratelimitmodulekeeper.Keeper { + return chain.GetTestSupport().RateLimit() +} + func (chain *TestChain) Balance(acc sdk.AccAddress, denom string) sdk.Coin { return chain.GetTestSupport().BankKeeper().GetBalance(chain.GetContext(), acc, denom) } @@ -621,6 +638,73 @@ func (chain TestChain) GetTestSupport() *centauri.TestSupport { return chain.App.(*TestingAppDecorator).TestSupport() } +func (chain *TestChain) QueryContract(suite *suite.Suite, contract sdk.AccAddress, key []byte) string { + wasmKeeper := chain.GetTestSupport().WasmdKeeper() + state, err := wasmKeeper.QuerySmart(chain.GetContext(), contract, key) + suite.Require().NoError(err) + return string(state) +} + +func (chain *TestChain) StoreContractCode(suite *suite.Suite, path string) { + govModuleAddress := chain.GetTestSupport().AccountKeeper().GetModuleAddress(govtypes.ModuleName) + wasmCode, err := os.ReadFile(path) + suite.Require().NoError(err) + + src := wasmtypes.StoreCodeProposalFixture(func(p *wasmtypes.StoreCodeProposal) { + p.RunAs = govModuleAddress.String() + p.WASMByteCode = wasmCode + checksum := sha256.Sum256(wasmCode) + p.CodeHash = checksum[:] + }) + + govKeeper := chain.GetTestSupport().GovKeeper() + // when + mustSubmitAndExecuteLegacyProposal(suite.T(), chain.GetContext(), src, chain.SenderAccount.GetAddress().String(), &govKeeper, govModuleAddress.String()) + suite.Require().NoError(err) +} + +func (chain *TestChain) InstantiateContract(suite *suite.Suite, msg string, codeID uint64) sdk.AccAddress { + wasmKeeper := chain.GetTestSupport().WasmdKeeper() + govModuleAddress := chain.GetTestSupport().AccountKeeper().GetModuleAddress(govtypes.ModuleName) + + contractKeeper := wasmkeeper.NewDefaultPermissionKeeper(wasmKeeper) + addr, _, err := contractKeeper.Instantiate(chain.GetContext(), codeID, govModuleAddress, govModuleAddress, []byte(msg), "contract", nil) + suite.Require().NoError(err) + return addr +} + +func mustSubmitAndExecuteLegacyProposal(t *testing.T, ctx sdk.Context, content v1beta1.Content, myActorAddress string, govKeeper *govkeeper.Keeper, authority string) { + t.Helper() + msgServer := govkeeper.NewMsgServerImpl(govKeeper) + // ignore all submit events + contentMsg, err := submitLegacyProposal(t, ctx.WithEventManager(sdk.NewEventManager()), content, myActorAddress, authority, msgServer) + require.NoError(t, err) + + _, err = msgServer.ExecLegacyContent(sdk.WrapSDKContext(ctx), v1.NewMsgExecLegacyContent(contentMsg.Content, authority)) + require.NoError(t, err) +} + +// does not fail on submit proposal +func submitLegacyProposal(t *testing.T, ctx sdk.Context, content v1beta1.Content, myActorAddress string, govAuthority string, msgServer v1.MsgServer) (*v1.MsgExecLegacyContent, error) { + t.Helper() + contentMsg, err := v1.NewLegacyContent(content, govAuthority) + require.NoError(t, err) + + proposal, err := v1.NewMsgSubmitProposal( + []sdk.Msg{contentMsg}, + sdk.Coins{}, + myActorAddress, + "", + "my title", + "my description", + ) + require.NoError(t, err) + + // when stored + _, err = msgServer.SubmitProposal(sdk.WrapSDKContext(ctx), proposal) + return contentMsg, err +} + var _ ibctesting.TestingApp = TestingAppDecorator{} type TestingAppDecorator struct { @@ -641,6 +725,14 @@ func (a TestingAppDecorator) GetStakingKeeper() ibctestingtypes.StakingKeeper { return a.TestSupport().StakingKeeper() } +func (a TestingAppDecorator) GetAccountKeeper() authkeeper.AccountKeeper { + return a.TestSupport().AccountKeeper() +} + +func (a TestingAppDecorator) GetGovKeeper() govkeeper.Keeper { + return a.TestSupport().GovKeeper() +} + func (a TestingAppDecorator) GetBankKeeper() bankkeeper.Keeper { return a.TestSupport().BankKeeper() } @@ -661,6 +753,10 @@ func (a TestingAppDecorator) TestSupport() *centauri.TestSupport { return centauri.NewTestSupport(a.t, a.CentauriApp) } +func (a TestingAppDecorator) GetWasmdKeeper() wasm.Keeper { + return a.TestSupport().WasmdKeeper() +} + func (a TestingAppDecorator) GetWasmKeeper() wasm08.Keeper { return a.TestSupport().Wasm08Keeper() } diff --git a/app/ibctesting/event_utils.go b/app/ibctesting/event_utils.go index a801402c8..e910ab69e 100644 --- a/app/ibctesting/event_utils.go +++ b/app/ibctesting/event_utils.go @@ -1,10 +1,12 @@ package ibctesting import ( + "fmt" "strconv" "strings" abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" ) @@ -56,6 +58,64 @@ func parsePacketFromEvent(evt abci.Event) channeltypes.Packet { } } +// ParsePacketFromEvents parses events emitted from a MsgRecvPacket and returns the +// acknowledgement. +func ParsePacketFromEvents(events sdk.Events) (channeltypes.Packet, error) { + for _, ev := range events { + if ev.Type == channeltypes.EventTypeSendPacket { + packet := channeltypes.Packet{} + for _, attr := range ev.Attributes { + switch string(attr.Key) { + case channeltypes.AttributeKeyData: + packet.Data = []byte(attr.Value) + + case channeltypes.AttributeKeySequence: + seq, err := strconv.ParseUint(string(attr.Value), 10, 64) + if err != nil { + return channeltypes.Packet{}, err + } + + packet.Sequence = seq + + case channeltypes.AttributeKeySrcPort: + packet.SourcePort = string(attr.Value) + + case channeltypes.AttributeKeySrcChannel: + packet.SourceChannel = string(attr.Value) + + case channeltypes.AttributeKeyDstPort: + packet.DestinationPort = string(attr.Value) + + case channeltypes.AttributeKeyDstChannel: + packet.DestinationChannel = string(attr.Value) + + case channeltypes.AttributeKeyTimeoutHeight: + height, err := clienttypes.ParseHeight(string(attr.Value)) + if err != nil { + return channeltypes.Packet{}, err + } + + packet.TimeoutHeight = height + + case channeltypes.AttributeKeyTimeoutTimestamp: + timestamp, err := strconv.ParseUint(string(attr.Value), 10, 64) + if err != nil { + return channeltypes.Packet{}, err + } + + packet.TimeoutTimestamp = timestamp + + default: + continue + } + } + + return packet, nil + } + } + return channeltypes.Packet{}, fmt.Errorf("acknowledgement event attribute not found") +} + // return the value for the attribute with the given name func getField(evt abci.Event, key string) string { for _, attr := range evt.Attributes { diff --git a/app/ibctesting/simapp/app.go b/app/ibctesting/simapp/app.go index df44ea4f6..2ce90278c 100644 --- a/app/ibctesting/simapp/app.go +++ b/app/ibctesting/simapp/app.go @@ -90,9 +90,9 @@ import ( "github.com/gorilla/mux" // TODO: mint module not complete yet, - "github.com/notional-labs/centauri/v3/x/mint" - mintkeeper "github.com/notional-labs/centauri/v3/x/mint/keeper" - minttypes "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint" + mintkeeper "github.com/notional-labs/centauri/v4/x/mint/keeper" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" "github.com/rakyll/statik/fs" "github.com/spf13/cast" @@ -123,13 +123,13 @@ import ( wasm08 "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" ibcmock "github.com/cosmos/ibc-go/v7/testing/mock" ibctestingtypes "github.com/cosmos/ibc-go/v7/testing/types" - simappparams "github.com/notional-labs/centauri/v3/app/ibctesting/simapp/params" - simappupgrades "github.com/notional-labs/centauri/v3/app/ibctesting/simapp/upgrades" - v6 "github.com/notional-labs/centauri/v3/app/ibctesting/simapp/upgrades/v6" - v7 "github.com/notional-labs/centauri/v3/app/ibctesting/simapp/upgrades/v7" - transfermiddleware "github.com/notional-labs/centauri/v3/x/transfermiddleware" - transfermiddlewarekeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" - transfermiddlewaretypes "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + simappparams "github.com/notional-labs/centauri/v4/app/ibctesting/simapp/params" + simappupgrades "github.com/notional-labs/centauri/v4/app/ibctesting/simapp/upgrades" + v6 "github.com/notional-labs/centauri/v4/app/ibctesting/simapp/upgrades/v6" + v7 "github.com/notional-labs/centauri/v4/app/ibctesting/simapp/upgrades/v7" + transfermiddleware "github.com/notional-labs/centauri/v4/x/transfermiddleware" + transfermiddlewarekeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" + transfermiddlewaretypes "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) const appName = "SimApp" @@ -379,6 +379,7 @@ func NewSimApp( app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, keys[upgradetypes.StoreKey], appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) app.TransferMiddlewarekeeper = transfermiddlewarekeeper.NewKeeper( keys[transfermiddlewaretypes.StoreKey], + app.GetSubspace(transfermiddlewaretypes.ModuleName), appCodec, app.IBCKeeper.ChannelKeeper, app.TransferKeeper, diff --git a/app/ibctesting/simapp/encoding.go b/app/ibctesting/simapp/encoding.go index 9b487648e..fe48f7b94 100644 --- a/app/ibctesting/simapp/encoding.go +++ b/app/ibctesting/simapp/encoding.go @@ -3,7 +3,7 @@ package simapp import ( "github.com/cosmos/cosmos-sdk/std" - simappparams "github.com/notional-labs/centauri/v3/app/ibctesting/simapp/params" + simappparams "github.com/notional-labs/centauri/v4/app/ibctesting/simapp/params" ) // MakeTestEncodingConfig creates an EncodingConfig for testing. This function diff --git a/app/ibctesting/simapp/sim_test.go b/app/ibctesting/simapp/sim_test.go index d7aebdbd3..5c0a3ead5 100644 --- a/app/ibctesting/simapp/sim_test.go +++ b/app/ibctesting/simapp/sim_test.go @@ -28,7 +28,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/simulation" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - minttypes "github.com/notional-labs/centauri/v3/x/mint/types" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" "github.com/stretchr/testify/require" ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" diff --git a/app/keepers/keepers.go b/app/keepers/keepers.go new file mode 100644 index 000000000..1cd6ba5fb --- /dev/null +++ b/app/keepers/keepers.go @@ -0,0 +1,472 @@ +package keepers + +import ( + "fmt" + "path/filepath" + "strings" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + + crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + + distrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + + evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + + "github.com/cosmos/cosmos-sdk/x/feegrant" + feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + govtypesv1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" + govtypesv1beta1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" + + "github.com/cosmos/cosmos-sdk/x/group" + groupkeeper "github.com/cosmos/cosmos-sdk/x/group/keeper" + + "github.com/cosmos/cosmos-sdk/x/params" + paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + + slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + "github.com/cosmos/cosmos-sdk/x/upgrade" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + icahost "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host" + icahostkeeper "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/keeper" + icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" + "github.com/cosmos/ibc-go/v7/modules/apps/transfer" + ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper" + ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + ibcclient "github.com/cosmos/ibc-go/v7/modules/core/02-client" + ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + ibchost "github.com/cosmos/ibc-go/v7/modules/core/exported" + ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper" + icq "github.com/strangelove-ventures/async-icq/v7" + icqkeeper "github.com/strangelove-ventures/async-icq/v7/keeper" + icqtypes "github.com/strangelove-ventures/async-icq/v7/types" + + custombankkeeper "github.com/notional-labs/centauri/v4/custom/bank/keeper" + + "github.com/strangelove-ventures/packet-forward-middleware/v7/router" + routerkeeper "github.com/strangelove-ventures/packet-forward-middleware/v7/router/keeper" + routertypes "github.com/strangelove-ventures/packet-forward-middleware/v7/router/types" + + alliancemodule "github.com/terra-money/alliance/x/alliance" + alliancemodulekeeper "github.com/terra-money/alliance/x/alliance/keeper" + alliancemoduletypes "github.com/terra-money/alliance/x/alliance/types" + + transfermiddleware "github.com/notional-labs/centauri/v4/x/transfermiddleware" + transfermiddlewarekeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" + transfermiddlewaretypes "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" + + ratelimitmodule "github.com/notional-labs/centauri/v4/x/ratelimit" + ratelimitmodulekeeper "github.com/notional-labs/centauri/v4/x/ratelimit/keeper" + ratelimitmoduletypes "github.com/notional-labs/centauri/v4/x/ratelimit/types" + + consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" + consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" + + mintkeeper "github.com/notional-labs/centauri/v4/x/mint/keeper" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" + + "github.com/CosmWasm/wasmd/x/wasm" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + wasm08Keeper "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" + wasmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/types" + + ibc_hooks "github.com/notional-labs/centauri/v4/x/ibc-hooks" + ibchookskeeper "github.com/notional-labs/centauri/v4/x/ibc-hooks/keeper" + ibchookstypes "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" +) + +const ( + AccountAddressPrefix = "centauri" + authorityAddress = "centauri10556m38z4x6pqalr9rl5ytf3cff8q46nk85k9m" +) + +type AppKeepers struct { + // keys to access the substores + keys map[string]*storetypes.KVStoreKey + tkeys map[string]*storetypes.TransientStoreKey + memKeys map[string]*storetypes.MemoryStoreKey + + // keepers + AccountKeeper authkeeper.AccountKeeper + BankKeeper custombankkeeper.Keeper + CapabilityKeeper *capabilitykeeper.Keeper + StakingKeeper *stakingkeeper.Keeper + SlashingKeeper slashingkeeper.Keeper + MintKeeper mintkeeper.Keeper + DistrKeeper distrkeeper.Keeper + GovKeeper govkeeper.Keeper + CrisisKeeper *crisiskeeper.Keeper + UpgradeKeeper *upgradekeeper.Keeper + ParamsKeeper paramskeeper.Keeper + IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly + EvidenceKeeper evidencekeeper.Keeper + TransferKeeper ibctransferkeeper.Keeper + ICQKeeper icqkeeper.Keeper + ICAHostKeeper icahostkeeper.Keeper + FeeGrantKeeper feegrantkeeper.Keeper + GroupKeeper groupkeeper.Keeper + Wasm08Keeper wasm08Keeper.Keeper // TODO: use this name ? + WasmKeeper wasm.Keeper + IBCHooksKeeper *ibchookskeeper.Keeper + Ics20WasmHooks *ibc_hooks.WasmHooks + HooksICS4Wrapper ibc_hooks.ICS4Middleware + // make scoped keepers public for test purposes + ScopedIBCKeeper capabilitykeeper.ScopedKeeper + ScopedTransferKeeper capabilitykeeper.ScopedKeeper + ScopedWasmKeeper capabilitykeeper.ScopedKeeper + ScopedICAHostKeeper capabilitykeeper.ScopedKeeper + ScopedRateLimitKeeper capabilitykeeper.ScopedKeeper + ConsensusParamsKeeper consensusparamkeeper.Keeper + // this line is used by starport scaffolding # stargate/app/keeperDeclaration + TransferMiddlewareKeeper transfermiddlewarekeeper.Keeper + RouterKeeper *routerkeeper.Keeper + RatelimitKeeper ratelimitmodulekeeper.Keeper + AllianceKeeper alliancemodulekeeper.Keeper +} + +// InitNormalKeepers initializes all 'normal' keepers. +func (appKeepers *AppKeepers) InitNormalKeepers( + appCodec codec.Codec, + cdc *codec.LegacyAmino, + bApp *baseapp.BaseApp, + maccPerms map[string][]string, + invCheckPeriod uint, + skipUpgradeHeights map[int64]bool, + homePath string, + appOpts servertypes.AppOptions, + wasmOpts []wasm.Option, + enabledProposals []wasm.ProposalType, +) { + // add keepers + appKeepers.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, appKeepers.keys[authtypes.StoreKey], authtypes.ProtoBaseAccount, maccPerms, AccountAddressPrefix, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + appKeepers.BankKeeper = custombankkeeper.NewBaseKeeper( + appCodec, appKeepers.keys[banktypes.StoreKey], appKeepers.AccountKeeper, appKeepers.BlacklistedModuleAccountAddrs(maccPerms), &appKeepers.TransferMiddlewareKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + appKeepers.StakingKeeper = stakingkeeper.NewKeeper( + appCodec, appKeepers.keys[stakingtypes.StoreKey], appKeepers.AccountKeeper, appKeepers.BankKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + appKeepers.MintKeeper = mintkeeper.NewKeeper( + appCodec, appKeepers.keys[minttypes.StoreKey], appKeepers.StakingKeeper, + appKeepers.AccountKeeper, appKeepers.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + appKeepers.DistrKeeper = distrkeeper.NewKeeper( + appCodec, appKeepers.keys[distrtypes.StoreKey], appKeepers.AccountKeeper, appKeepers.BankKeeper, + appKeepers.StakingKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + appKeepers.SlashingKeeper = slashingkeeper.NewKeeper( + appCodec, cdc, appKeepers.keys[slashingtypes.StoreKey], appKeepers.StakingKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + appKeepers.CrisisKeeper = crisiskeeper.NewKeeper(appCodec, appKeepers.keys[crisistypes.StoreKey], + invCheckPeriod, appKeepers.BankKeeper, authtypes.FeeCollectorName, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + groupConfig := group.DefaultConfig() + /* + Example of setting group params: + groupConfig.MaxMetadataLen = 1000 + */ + appKeepers.GroupKeeper = groupkeeper.NewKeeper( + appKeepers.keys[group.StoreKey], + appCodec, + bApp.MsgServiceRouter(), + appKeepers.AccountKeeper, + groupConfig, + ) + + appKeepers.FeeGrantKeeper = feegrantkeeper.NewKeeper(appCodec, appKeepers.keys[feegrant.StoreKey], appKeepers.AccountKeeper) + appKeepers.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, appKeepers.keys[upgradetypes.StoreKey], appCodec, homePath, bApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + + appKeepers.AllianceKeeper = alliancemodulekeeper.NewKeeper( + appCodec, + appKeepers.keys[alliancemoduletypes.StoreKey], + appKeepers.GetSubspace(alliancemoduletypes.ModuleName), + appKeepers.AccountKeeper, + appKeepers.BankKeeper, + appKeepers.StakingKeeper, + appKeepers.DistrKeeper, + ) + + appKeepers.BankKeeper.RegisterKeepers(appKeepers.AllianceKeeper, appKeepers.StakingKeeper) + // register the staking hooks + // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks + appKeepers.StakingKeeper.SetHooks( + stakingtypes.NewMultiStakingHooks(appKeepers.DistrKeeper.Hooks(), appKeepers.SlashingKeeper.Hooks(), appKeepers.AllianceKeeper.StakingHooks()), + ) + + // ... other modules keepers + + // Create IBC Keeper + appKeepers.IBCKeeper = ibckeeper.NewKeeper( + appCodec, appKeepers.keys[ibchost.StoreKey], appKeepers.GetSubspace(ibchost.ModuleName), appKeepers.StakingKeeper, appKeepers.UpgradeKeeper, appKeepers.ScopedIBCKeeper, + ) + + appKeepers.Wasm08Keeper = wasm08Keeper.NewKeeper(appCodec, appKeepers.keys[wasmtypes.StoreKey], authorityAddress, homePath) + + // ICA Host keeper + appKeepers.ICAHostKeeper = icahostkeeper.NewKeeper( + appCodec, appKeepers.keys[icahosttypes.StoreKey], appKeepers.GetSubspace(icahosttypes.SubModuleName), + appKeepers.IBCKeeper.ChannelKeeper, + appKeepers.IBCKeeper.ChannelKeeper, &appKeepers.IBCKeeper.PortKeeper, + appKeepers.AccountKeeper, appKeepers.ScopedICAHostKeeper, bApp.MsgServiceRouter(), + ) + + icaHostStack := icahost.NewIBCModule(appKeepers.ICAHostKeeper) + + // Create Transfer Keepers + // * SendPacket. Originates from the transferKeeper and goes up the stack: + // transferKeeper.SendPacket -> transfermiddleware.SendPacket -> ibc_rate_limit.SendPacket -> ibc_hooks.SendPacket -> channel.SendPacket + // * RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way + // channel.RecvPacket -> ibc_hooks.OnRecvPacket -> ibc_rate_limit.OnRecvPacket -> forward.OnRecvPacket -> transfermiddleware_OnRecvPacket -> transfer.OnRecvPacket + // + hooksKeeper := ibchookskeeper.NewKeeper( + appKeepers.keys[ibchookstypes.StoreKey], + ) + appKeepers.IBCHooksKeeper = &hooksKeeper + + centauriPrefix := sdk.GetConfig().GetBech32AccountAddrPrefix() + wasmHooks := ibc_hooks.NewWasmHooks(&hooksKeeper, nil, centauriPrefix) // The contract keeper needs to be set later + appKeepers.Ics20WasmHooks = &wasmHooks + appKeepers.HooksICS4Wrapper = ibc_hooks.NewICS4Middleware( + appKeepers.IBCKeeper.ChannelKeeper, + appKeepers.Ics20WasmHooks, + ) + + appKeepers.TransferMiddlewareKeeper = transfermiddlewarekeeper.NewKeeper( + appKeepers.keys[transfermiddlewaretypes.StoreKey], + appKeepers.GetSubspace(transfermiddlewaretypes.ModuleName), + appCodec, + &appKeepers.RatelimitKeeper, + &appKeepers.TransferKeeper, + appKeepers.BankKeeper, + authorityAddress, + ) + + appKeepers.TransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, appKeepers.keys[ibctransfertypes.StoreKey], + appKeepers.GetSubspace(ibctransfertypes.ModuleName), + &appKeepers.TransferMiddlewareKeeper, // ICS4Wrapper + appKeepers.IBCKeeper.ChannelKeeper, + &appKeepers.IBCKeeper.PortKeeper, + appKeepers.AccountKeeper, + appKeepers.BankKeeper, + appKeepers.ScopedTransferKeeper, + ) + + appKeepers.RouterKeeper = routerkeeper.NewKeeper( + appCodec, + appKeepers.keys[routertypes.StoreKey], + appKeepers.GetSubspace(routertypes.ModuleName), + appKeepers.TransferKeeper, + appKeepers.IBCKeeper.ChannelKeeper, + &appKeepers.DistrKeeper, + appKeepers.BankKeeper, + appKeepers.TransferMiddlewareKeeper, + appKeepers.IBCKeeper.ChannelKeeper, + ) + + appKeepers.RatelimitKeeper = *ratelimitmodulekeeper.NewKeeper( + appCodec, + appKeepers.keys[ratelimitmoduletypes.StoreKey], + appKeepers.GetSubspace(ratelimitmoduletypes.ModuleName), + appKeepers.BankKeeper, + appKeepers.IBCKeeper.ChannelKeeper, + // TODO: Implement ICS4Wrapper in Records and pass records keeper here + &appKeepers.HooksICS4Wrapper, // ICS4Wrapper + appKeepers.TransferMiddlewareKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + transferIBCModule := transfer.NewIBCModule(appKeepers.TransferKeeper) + scopedICQKeeper := appKeepers.CapabilityKeeper.ScopeToModule(icqtypes.ModuleName) + + appKeepers.ICQKeeper = icqkeeper.NewKeeper( + appCodec, appKeepers.keys[icqtypes.StoreKey], appKeepers.GetSubspace(icqtypes.ModuleName), + appKeepers.IBCKeeper.ChannelKeeper, appKeepers.IBCKeeper.ChannelKeeper, &appKeepers.IBCKeeper.PortKeeper, + scopedICQKeeper, bApp, + ) + + icqIBCModule := icq.NewIBCModule(appKeepers.ICQKeeper) + transfermiddlewareStack := transfermiddleware.NewIBCMiddleware( + transferIBCModule, + appKeepers.TransferMiddlewareKeeper, + ) + + ibcMiddlewareStack := router.NewIBCMiddleware( + transfermiddlewareStack, + appKeepers.RouterKeeper, + 0, + routerkeeper.DefaultForwardTransferPacketTimeoutTimestamp, + routerkeeper.DefaultRefundTransferPacketTimeoutTimestamp, + ) + ratelimitMiddlewareStack := ratelimitmodule.NewIBCMiddleware(appKeepers.RatelimitKeeper, ibcMiddlewareStack) + hooksTransferMiddleware := ibc_hooks.NewIBCMiddleware(ratelimitMiddlewareStack, &appKeepers.HooksICS4Wrapper) + + // Create evidence Keeper for to register the IBC light client misbehaviour evidence route + evidenceKeeper := evidencekeeper.NewKeeper( + appCodec, appKeepers.keys[evidencetypes.StoreKey], appKeepers.StakingKeeper, appKeepers.SlashingKeeper, + ) + // If evidence needs to be handled for the app, set routes in router here and seal + appKeepers.EvidenceKeeper = *evidenceKeeper + + wasmDir := filepath.Join(homePath, "wasm") + wasmConfig, err := wasm.ReadWasmConfig(appOpts) + if err != nil { + panic(fmt.Sprintf("error while reading wasm config: %s", err)) + } + + // The last arguments can contain custom message handlers, and custom query handlers, + // if we want to allow any custom callbacks + availableCapabilities := strings.Join(AllCapabilities(), ",") + appKeepers.WasmKeeper = wasm.NewKeeper( + appCodec, + appKeepers.keys[wasm.StoreKey], + appKeepers.AccountKeeper, + appKeepers.BankKeeper, + appKeepers.StakingKeeper, + distrkeeper.NewQuerier(appKeepers.DistrKeeper), + appKeepers.IBCKeeper.ChannelKeeper, // ISC4 Wrapper: fee IBC middleware + appKeepers.IBCKeeper.ChannelKeeper, + &appKeepers.IBCKeeper.PortKeeper, + appKeepers.ScopedWasmKeeper, + appKeepers.TransferKeeper, + bApp.MsgServiceRouter(), + bApp.GRPCQueryRouter(), + wasmDir, + wasmConfig, + availableCapabilities, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + wasmOpts..., + ) + + appKeepers.Ics20WasmHooks.ContractKeeper = &appKeepers.WasmKeeper + + // Register Gov (must be registered after stakeibc) + govRouter := govtypesv1beta1.NewRouter() + govRouter.AddRoute(govtypes.RouterKey, govtypesv1beta1.ProposalHandler). + AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(appKeepers.ParamsKeeper)). + // AddRoute(distrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(appKeepers.DistrKeeper)). + AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(appKeepers.UpgradeKeeper)). + AddRoute(ibcclienttypes.RouterKey, ibcclient.NewClientProposalHandler(appKeepers.IBCKeeper.ClientKeeper)). + AddRoute(alliancemoduletypes.RouterKey, alliancemodule.NewAllianceProposalHandler(appKeepers.AllianceKeeper)) + + // The gov proposal types can be individually enabled + if len(enabledProposals) != 0 { + govRouter.AddRoute(wasm.RouterKey, wasm.NewWasmProposalHandler(appKeepers.WasmKeeper, enabledProposals)) + } + + govKeeper := *govkeeper.NewKeeper( + appCodec, appKeepers.keys[govtypes.StoreKey], appKeepers.AccountKeeper, appKeepers.BankKeeper, + appKeepers.StakingKeeper, bApp.MsgServiceRouter(), govtypes.DefaultConfig(), authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + + govKeeper.SetLegacyRouter(govRouter) + + appKeepers.GovKeeper = *govKeeper.SetHooks( + govtypes.NewMultiGovHooks( + // register the governance hooks + ), + ) + + ibcRouter := porttypes.NewRouter() + ibcRouter.AddRoute(ibctransfertypes.ModuleName, hooksTransferMiddleware) + ibcRouter.AddRoute(icqtypes.ModuleName, icqIBCModule) + ibcRouter.AddRoute(wasm.ModuleName, wasm.NewIBCHandler(appKeepers.WasmKeeper, appKeepers.IBCKeeper.ChannelKeeper, appKeepers.IBCKeeper.ChannelKeeper)) + ibcRouter.AddRoute(icahosttypes.SubModuleName, icaHostStack) + + // this line is used by starport scaffolding # ibc/app/router + appKeepers.IBCKeeper.SetRouter(ibcRouter) +} + +// InitSpecialKeepers initiates special keepers (upgradekeeper, params keeper) +func (appKeepers *AppKeepers) InitSpecialKeepers( + appCodec codec.Codec, + cdc *codec.LegacyAmino, + bApp *baseapp.BaseApp, + invCheckPeriod uint, + skipUpgradeHeights map[int64]bool, + homePath string, +) { + appKeepers.GenerateKeys() + appKeepers.ParamsKeeper = appKeepers.initParamsKeeper(appCodec, cdc, appKeepers.keys[paramstypes.StoreKey], appKeepers.tkeys[paramstypes.TStoreKey]) + appKeepers.CapabilityKeeper = capabilitykeeper.NewKeeper(appCodec, appKeepers.keys[capabilitytypes.StoreKey], appKeepers.memKeys[capabilitytypes.MemStoreKey]) + + // set the BaseApp's parameter store + appKeepers.ConsensusParamsKeeper = consensusparamkeeper.NewKeeper(appCodec, appKeepers.keys[consensusparamtypes.StoreKey], authtypes.NewModuleAddress(govtypes.ModuleName).String()) + bApp.SetParamStore(&appKeepers.ConsensusParamsKeeper) + + // grant capabilities for the ibc and ibc-transfer modules + appKeepers.ScopedIBCKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) + appKeepers.ScopedTransferKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) + appKeepers.ScopedWasmKeeper = appKeepers.CapabilityKeeper.ScopeToModule(wasm.ModuleName) + appKeepers.ScopedICAHostKeeper = appKeepers.CapabilityKeeper.ScopeToModule(icahosttypes.SubModuleName) + appKeepers.ScopedRateLimitKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ratelimitmoduletypes.ModuleName) + + appKeepers.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, appKeepers.keys[upgradetypes.StoreKey], appCodec, homePath, bApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) +} + +// initParamsKeeper init params keeper and its subspaces +func (appKeepers *AppKeepers) initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey storetypes.StoreKey) paramskeeper.Keeper { + paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) + + paramsKeeper.Subspace(authtypes.ModuleName) + paramsKeeper.Subspace(banktypes.ModuleName) + paramsKeeper.Subspace(stakingtypes.ModuleName) + paramsKeeper.Subspace(distrtypes.ModuleName) + paramsKeeper.Subspace(slashingtypes.ModuleName) + paramsKeeper.Subspace(routertypes.ModuleName).WithKeyTable(routertypes.ParamKeyTable()) // TODO: + paramsKeeper.Subspace(govtypes.ModuleName).WithKeyTable(govtypesv1.ParamKeyTable()) //nolint:staticcheck + paramsKeeper.Subspace(minttypes.ModuleName).WithKeyTable(minttypes.ParamKeyTable()) + paramsKeeper.Subspace(crisistypes.ModuleName) + paramsKeeper.Subspace(ibctransfertypes.ModuleName) + paramsKeeper.Subspace(ratelimitmoduletypes.ModuleName) + paramsKeeper.Subspace(icqtypes.ModuleName) + paramsKeeper.Subspace(ibchost.ModuleName) + paramsKeeper.Subspace(icahosttypes.SubModuleName) + paramsKeeper.Subspace(alliancemoduletypes.ModuleName) + paramsKeeper.Subspace(wasm.ModuleName) + paramsKeeper.Subspace(transfermiddlewaretypes.ModuleName) + + return paramsKeeper +} + +// ModuleAccountAddrs returns all the app's module account addresses. +func (appKeepers *AppKeepers) BlacklistedModuleAccountAddrs(maccPerms map[string][]string) map[string]bool { + modAccAddrs := make(map[string]bool) + // DO NOT REMOVE: StringMapKeys fixes non-deterministic map iteration + for acc := range maccPerms { + modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true + } + return modAccAddrs +} diff --git a/app/keepers/keys.go b/app/keepers/keys.go new file mode 100644 index 000000000..6126eb791 --- /dev/null +++ b/app/keepers/keys.go @@ -0,0 +1,103 @@ +package keepers + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + + // bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + "github.com/cosmos/cosmos-sdk/x/feegrant" + "github.com/cosmos/cosmos-sdk/x/group" + + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" + ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + ibchost "github.com/cosmos/ibc-go/v7/modules/core/exported" + icqtypes "github.com/strangelove-ventures/async-icq/v7/types" + + routertypes "github.com/strangelove-ventures/packet-forward-middleware/v7/router/types" + alliancemoduletypes "github.com/terra-money/alliance/x/alliance/types" + + ibchookstypes "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" + ratelimitmoduletypes "github.com/notional-labs/centauri/v4/x/ratelimit/types" + transfermiddlewaretypes "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" + + consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" + + storetypes "github.com/cosmos/cosmos-sdk/store/types" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" + + "github.com/CosmWasm/wasmd/x/wasm" + wasm08types "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/types" +) + +// GenerateKeys generates new keys (KV Store, Transient store, and memory store). +func (appKeepers *AppKeepers) GenerateKeys() { + // Define what keys will be used in the cosmos-sdk key/value store. + // Cosmos-SDK modules each have a "key" that allows the application to reference what they've stored on the chain. + appKeepers.keys = sdk.NewKVStoreKeys( + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, icqtypes.StoreKey, capabilitytypes.StoreKey, consensusparamtypes.StoreKey, wasm08types.StoreKey, + crisistypes.StoreKey, routertypes.StoreKey, transfermiddlewaretypes.StoreKey, group.StoreKey, minttypes.StoreKey, alliancemoduletypes.StoreKey, wasm.StoreKey, ibchookstypes.StoreKey, icahosttypes.StoreKey, ratelimitmoduletypes.StoreKey, + ) + + // Define transient store keys + appKeepers.tkeys = sdk.NewTransientStoreKeys(paramstypes.TStoreKey) + + // MemKeys are for information that is stored only in RAM. + appKeepers.memKeys = sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) +} + +// GetKVStoreKey gets KV Store keys. +func (appKeepers *AppKeepers) GetKVStoreKey() map[string]*storetypes.KVStoreKey { + return appKeepers.keys +} + +// GetTransientStoreKey gets Transient Store keys. +func (appKeepers *AppKeepers) GetTransientStoreKey() map[string]*storetypes.TransientStoreKey { + return appKeepers.tkeys +} + +// GetMemoryStoreKey get memory Store keys. +func (appKeepers *AppKeepers) GetMemoryStoreKey() map[string]*storetypes.MemoryStoreKey { + return appKeepers.memKeys +} + +// GetKey returns the KVStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (appKeepers *AppKeepers) GetKey(storeKey string) *storetypes.KVStoreKey { + return appKeepers.keys[storeKey] +} + +// GetTKey returns the TransientStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (appKeepers *AppKeepers) GetTKey(storeKey string) *storetypes.TransientStoreKey { + return appKeepers.tkeys[storeKey] +} + +// GetMemKey returns the MemStoreKey for the provided mem key. +// +// NOTE: This is solely used for testing purposes. +func (appKeepers *AppKeepers) GetMemKey(storeKey string) *storetypes.MemoryStoreKey { + return appKeepers.memKeys[storeKey] +} + +// GetSubspace returns a param subspace for a given module name. +// +// NOTE: This is solely to be used for testing purposes. +func (appKeepers *AppKeepers) GetSubspace(moduleName string) paramstypes.Subspace { + subspace, _ := appKeepers.ParamsKeeper.GetSubspace(moduleName) + return subspace +} diff --git a/app/wasm.go b/app/keepers/wasm.go similarity index 96% rename from app/wasm.go rename to app/keepers/wasm.go index a01074f9e..50bc15654 100644 --- a/app/wasm.go +++ b/app/keepers/wasm.go @@ -1,4 +1,4 @@ -package app +package keepers // AllCapabilities returns all capabilities available with the current wasmvm // See https://github.com/CosmWasm/cosmwasm/blob/main/docs/CAPABILITIES-BUILT-IN.md diff --git a/app/test_access.go b/app/test_access.go index 6e32fcbf0..c1ca828d4 100644 --- a/app/test_access.go +++ b/app/test_access.go @@ -3,17 +3,20 @@ package app import ( "testing" + "github.com/CosmWasm/wasmd/x/wasm" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client" - "github.com/cosmos/cosmos-sdk/codec" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper" ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper" wasm08 "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" - routerKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + ratelimitkeeper "github.com/notional-labs/centauri/v4/x/ratelimit/keeper" + tfmdKeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" ) type TestSupport struct { @@ -46,10 +49,18 @@ func (s TestSupport) StakingKeeper() *stakingkeeper.Keeper { return s.app.StakingKeeper } +func (s TestSupport) AccountKeeper() authkeeper.AccountKeeper { + return s.app.AccountKeeper +} + func (s TestSupport) BankKeeper() bankkeeper.Keeper { return s.app.BankKeeper } +func (s TestSupport) GovKeeper() govkeeper.Keeper { + return s.app.GovKeeper +} + func (s TestSupport) TransferKeeper() ibctransferkeeper.Keeper { return s.app.TransferKeeper } @@ -58,6 +69,10 @@ func (s TestSupport) Wasm08Keeper() wasm08.Keeper { return s.app.Wasm08Keeper } +func (s TestSupport) WasmdKeeper() wasm.Keeper { + return s.app.WasmKeeper +} + func (s TestSupport) GetBaseApp() *baseapp.BaseApp { return s.app.BaseApp } @@ -66,6 +81,10 @@ func (s TestSupport) GetTxConfig() client.TxConfig { return s.app.GetTxConfig() } -func (s TestSupport) TransferMiddleware() routerKeeper.Keeper { +func (s TestSupport) TransferMiddleware() tfmdKeeper.Keeper { return s.app.TransferMiddlewareKeeper } + +func (s TestSupport) RateLimit() ratelimitkeeper.Keeper { + return s.app.RatelimitKeeper +} diff --git a/app/test_helpers.go b/app/test_helpers.go index f9a593a1d..0f0cb67a8 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -35,7 +35,7 @@ import ( bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - minttypes "github.com/notional-labs/centauri/v3/x/mint/types" + minttypes "github.com/notional-labs/centauri/v4/x/mint/types" "github.com/CosmWasm/wasmd/x/wasm" wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" @@ -96,6 +96,7 @@ func setup(tb testing.TB, withGenesis bool, invCheckPeriod uint) (*CentauriApp, // account. A Nop logger is set in FeeAbs. func SetupWithGenesisValSet( t *testing.T, + ctxTime time.Time, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance, @@ -159,6 +160,7 @@ func SetupWithGenesisValSet( // init chain will set the validator set and initialize the genesis accounts app.InitChain( abci.RequestInitChain{ + Time: ctxTime, Validators: []abci.ValidatorUpdate{}, ConsensusParams: DefaultConsensusParams, AppStateBytes: stateBytes, diff --git a/app/upgrade/centauri/constants.go b/app/upgrades/centauri/constants.go similarity index 100% rename from app/upgrade/centauri/constants.go rename to app/upgrades/centauri/constants.go diff --git a/app/upgrade/centauri/readme.md b/app/upgrades/centauri/readme.md similarity index 100% rename from app/upgrade/centauri/readme.md rename to app/upgrades/centauri/readme.md diff --git a/app/upgrade/centauri/upgrade.go b/app/upgrades/centauri/upgrade.go similarity index 88% rename from app/upgrade/centauri/upgrade.go rename to app/upgrades/centauri/upgrade.go index f7b9037e1..ce6260b21 100644 --- a/app/upgrade/centauri/upgrade.go +++ b/app/upgrades/centauri/upgrade.go @@ -16,10 +16,10 @@ import ( govkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" - bech32authmigration "github.com/notional-labs/centauri/v3/bech32-migration/auth" - bech32govmigration "github.com/notional-labs/centauri/v3/bech32-migration/gov" - bech32slashingmigration "github.com/notional-labs/centauri/v3/bech32-migration/slashing" - bech32stakingmigration "github.com/notional-labs/centauri/v3/bech32-migration/staking" + bech32authmigration "github.com/notional-labs/centauri/v4/bech32-migration/auth" + bech32govmigration "github.com/notional-labs/centauri/v4/bech32-migration/gov" + bech32slashingmigration "github.com/notional-labs/centauri/v4/bech32-migration/slashing" + bech32stakingmigration "github.com/notional-labs/centauri/v4/bech32-migration/staking" ) func CreateUpgradeHandler( diff --git a/app/upgrade/reward/constants.go b/app/upgrades/reward/constants.go similarity index 100% rename from app/upgrade/reward/constants.go rename to app/upgrades/reward/constants.go diff --git a/app/upgrade/reward/upgrade.go b/app/upgrades/reward/upgrade.go similarity index 91% rename from app/upgrade/reward/upgrade.go rename to app/upgrades/reward/upgrade.go index cde43ed3e..461a99a9b 100644 --- a/app/upgrade/reward/upgrade.go +++ b/app/upgrades/reward/upgrade.go @@ -4,8 +4,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" - mintkeeper "github.com/notional-labs/centauri/v3/x/mint/keeper" - tfmwkeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + mintkeeper "github.com/notional-labs/centauri/v4/x/mint/keeper" + tfmwkeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" ) var listAllowedRelayAddress = []string{ diff --git a/app/upgrades/types.go b/app/upgrades/types.go new file mode 100644 index 000000000..17780db22 --- /dev/null +++ b/app/upgrades/types.go @@ -0,0 +1,48 @@ +package upgrades + +import ( + types "github.com/cometbft/cometbft/proto/tendermint/types" + store "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + "github.com/notional-labs/centauri/v4/app/keepers" +) + +// BaseAppParamManager defines an interrace that BaseApp is expected to fullfil +// that allows upgrade handlers to modify BaseApp parameters. +type BaseAppParamManager interface { + GetConsensusParams(ctx sdk.Context) *types.ConsensusParams + StoreConsensusParams(ctx sdk.Context, cp *types.ConsensusParams) +} + +// Upgrade defines a struct containing necessary fields that a SoftwareUpgradeProposal +// must have written, in order for the state migration to go smoothly. +// An upgrade must implement this struct, and then set it in the app.go. +// The app.go will then define the handler. +type Upgrade struct { + // Upgrade version name, for the upgrade handler, e.g. `v7` + UpgradeName string + + // CreateUpgradeHandler defines the function that creates an upgrade handler + CreateUpgradeHandler func(*module.Manager, module.Configurator, BaseAppParamManager, *keepers.AppKeepers) upgradetypes.UpgradeHandler + + // Store upgrades, should be used for any new modules introduced, new modules deleted, or store names renamed. + StoreUpgrades store.StoreUpgrades +} + +// Fork defines a struct containing the requisite fields for a non-software upgrade proposal +// Hard Fork at a given height to implement. +// There is one time code that can be added for the start of the Fork, in `BeginForkLogic`. +// Any other change in the code should be height-gated, if the goal is to have old and new binaries +// to be compatible prior to the upgrade height. +type Fork struct { + // Upgrade version name, for the upgrade handler, e.g. `v7` + UpgradeName string + // height the upgrade occurs at + UpgradeHeight int64 + + // Function that runs some custom state transition code at the beginning of a fork. + BeginForkLogic func(ctx sdk.Context, keepers *keepers.AppKeepers) +} diff --git a/app/upgrades/v4/constants.go b/app/upgrades/v4/constants.go new file mode 100644 index 000000000..488219e92 --- /dev/null +++ b/app/upgrades/v4/constants.go @@ -0,0 +1,24 @@ +package v4 + +import ( + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + store "github.com/cosmos/cosmos-sdk/store/types" + icahosttypes "github.com/cosmos/ibc-go/v7/modules/apps/27-interchain-accounts/host/types" + "github.com/notional-labs/centauri/v4/app/upgrades" + ibchookstypes "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" + ratelimitmoduletypes "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +const ( + // UpgradeName defines the on-chain upgrade name for the Centauri upgrade. + UpgradeName = "v4" +) + +var Upgrade = upgrades.Upgrade{ + UpgradeName: UpgradeName, + CreateUpgradeHandler: CreateUpgradeHandler, + StoreUpgrades: store.StoreUpgrades{ + Added: []string{wasmtypes.StoreKey, ibchookstypes.StoreKey, ratelimitmoduletypes.StoreKey, icahosttypes.StoreKey}, + Deleted: []string{}, + }, +} diff --git a/app/upgrades/v4/upgrade.go b/app/upgrades/v4/upgrade.go new file mode 100644 index 000000000..29a8e6217 --- /dev/null +++ b/app/upgrades/v4/upgrade.go @@ -0,0 +1,25 @@ +package v4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/notional-labs/centauri/v4/app/keepers" + "github.com/notional-labs/centauri/v4/app/upgrades" + tfmdtypes "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" +) + +func CreateUpgradeHandler( + mm *module.Manager, + configurator module.Configurator, + bpm upgrades.BaseAppParamManager, + keepers *keepers.AppKeepers, +) upgradetypes.UpgradeHandler { + return func(ctx sdk.Context, plan upgradetypes.Plan, vm module.VersionMap) (module.VersionMap, error) { + // Add params for transfer middleware + transmiddlewareParams := tfmdtypes.DefaultParams() + keepers.TransferMiddlewareKeeper.SetParams(ctx, transmiddlewareParams) + + return mm.RunMigrations(ctx, configurator, vm) + } +} diff --git a/bech32-migration/auth/auth.go b/bech32-migration/auth/auth.go index 4270b581c..89fabf2b8 100644 --- a/bech32-migration/auth/auth.go +++ b/bech32-migration/auth/auth.go @@ -8,7 +8,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/auth/types" vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" - "github.com/notional-labs/centauri/v3/bech32-migration/utils" + "github.com/notional-labs/centauri/v4/bech32-migration/utils" ) func MigrateAddressBech32(ctx sdk.Context, storeKey storetypes.StoreKey, cdc codec.BinaryCodec) { diff --git a/bech32-migration/gov/gov.go b/bech32-migration/gov/gov.go index 41df3db58..be4006e3e 100644 --- a/bech32-migration/gov/gov.go +++ b/bech32-migration/gov/gov.go @@ -9,7 +9,7 @@ import ( v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1" - "github.com/notional-labs/centauri/v3/bech32-migration/utils" + "github.com/notional-labs/centauri/v4/bech32-migration/utils" ) func MigrateAddressBech32(ctx sdk.Context, storeKey storetypes.StoreKey, cdc codec.BinaryCodec) { diff --git a/bech32-migration/slashing/slashing.go b/bech32-migration/slashing/slashing.go index f3ea5ea60..16473eeea 100644 --- a/bech32-migration/slashing/slashing.go +++ b/bech32-migration/slashing/slashing.go @@ -5,7 +5,7 @@ import ( storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/slashing/types" - "github.com/notional-labs/centauri/v3/bech32-migration/utils" + "github.com/notional-labs/centauri/v4/bech32-migration/utils" ) func MigrateAddressBech32(ctx sdk.Context, storeKey storetypes.StoreKey, cdc codec.BinaryCodec) { diff --git a/bech32-migration/staking/staking.go b/bech32-migration/staking/staking.go index 1e06fe65d..88742fe6d 100644 --- a/bech32-migration/staking/staking.go +++ b/bech32-migration/staking/staking.go @@ -5,7 +5,7 @@ import ( storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/staking/types" - "github.com/notional-labs/centauri/v3/bech32-migration/utils" + "github.com/notional-labs/centauri/v4/bech32-migration/utils" ) func MigrateAddressBech32(ctx sdk.Context, storeKey storetypes.StoreKey, cdc codec.BinaryCodec) { diff --git a/cmd/centaurid/cmd/genaccounts.go b/cmd/centaurid/cmd/genaccounts.go index b3b727856..3121f98af 100644 --- a/cmd/centaurid/cmd/genaccounts.go +++ b/cmd/centaurid/cmd/genaccounts.go @@ -6,7 +6,7 @@ import ( "errors" "fmt" - "github.com/notional-labs/centauri/v3/bech32-migration/utils" + "github.com/notional-labs/centauri/v4/bech32-migration/utils" "github.com/spf13/cobra" "github.com/cosmos/cosmos-sdk/client" diff --git a/cmd/centaurid/cmd/root.go b/cmd/centaurid/cmd/root.go index 50f53fb2c..a4d0bd3e9 100644 --- a/cmd/centaurid/cmd/root.go +++ b/cmd/centaurid/cmd/root.go @@ -33,8 +33,8 @@ import ( genutilcli "github.com/cosmos/cosmos-sdk/x/genutil/client/cli" genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" - "github.com/notional-labs/centauri/v3/app" - // "github.com/notional-labs/centauri/v3/app/params" + "github.com/notional-labs/centauri/v4/app" + // "github.com/notional-labs/centauri/v4/app/params" // this line is used by starport scaffolding # stargate/root/import ) diff --git a/cmd/centaurid/main.go b/cmd/centaurid/main.go index dc20261e7..58b8996e5 100644 --- a/cmd/centaurid/main.go +++ b/cmd/centaurid/main.go @@ -5,9 +5,9 @@ import ( svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" - "github.com/notional-labs/centauri/v3/app" - cmd "github.com/notional-labs/centauri/v3/cmd/centaurid/cmd" - cmdcfg "github.com/notional-labs/centauri/v3/cmd/centaurid/config" + "github.com/notional-labs/centauri/v4/app" + cmd "github.com/notional-labs/centauri/v4/cmd/centaurid/cmd" + cmdcfg "github.com/notional-labs/centauri/v4/cmd/centaurid/config" ) func main() { diff --git a/custom/bank/bank_test.go b/custom/bank/bank_test.go index 84333f210..252b03ce5 100644 --- a/custom/bank/bank_test.go +++ b/custom/bank/bank_test.go @@ -7,7 +7,7 @@ import ( banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" - customibctesting "github.com/notional-labs/centauri/v3/app/ibctesting" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" "github.com/stretchr/testify/suite" ) @@ -131,3 +131,128 @@ func (suite *CustomBankTestSuite) TestTotalSupply() { }) } } + +func (suite *CustomBankTestSuite) TestTotalSupply2() { + var ( + transferAmount = sdk.NewInt(1000000000) + transferAmount2 = sdk.NewInt(3500000000) + // when transfer via sdk transfer from A (module) -> B (contract) + coinChainASendToB = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + coinChainCSentToB = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount2) + timeoutHeight = clienttypes.NewHeight(1, 110) + ) + var ( + expChainBBalanceDiff sdk.Coins + pathAB = NewTransferPath(suite.chainA, suite.chainB) + pathCB = NewTransferPath(suite.chainC, suite.chainB) + ) + + testCases := []struct { + name string + expChainABalanceDiff sdk.Coin + expChainCBalanceDiff sdk.Coin + expTotalSupplyDiff sdk.Coins + malleate func() + }{ + { + "Total supply with no transfermiddleware setup", + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount2), + sdk.Coins{sdk.NewCoin("ibc/3C3D7B3BE4ECC85A0E5B52A3AEC3B7DFC2AA9CA47C37821E57020D6807043BE9", transferAmount2), sdk.NewCoin("ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878", transferAmount)}, + func() { + transferCoinFromA := ibctransfertypes.GetTransferCoin(pathAB.EndpointB.ChannelConfig.PortID, pathAB.EndpointB.ChannelID, coinChainASendToB.Denom, transferAmount) + transferCoinFromC := ibctransfertypes.GetTransferCoin(pathCB.EndpointB.ChannelConfig.PortID, pathCB.EndpointB.ChannelID, coinChainCSentToB.Denom, transferAmount2) + expChainBBalanceDiff = sdk.NewCoins(transferCoinFromA, transferCoinFromC) + }, + }, + { + "Total supply with transfermiddleware setup", + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount2), + sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), sdk.NewCoin("ibc/3C3D7B3BE4ECC85A0E5B52A3AEC3B7DFC2AA9CA47C37821E57020D6807043BE9", transferAmount2)), + func() { + // Add parachain token info + chainBtransMiddleware := suite.chainB.TransferMiddleware() + + transferCoinFromA := sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + transferCoinFromC := ibctransfertypes.GetTransferCoin(pathCB.EndpointB.ChannelConfig.PortID, pathCB.EndpointB.ChannelID, coinChainASendToB.Denom, transferAmount2) + expChainBBalanceDiff = sdk.NewCoins(transferCoinFromA, transferCoinFromC) + + err := chainBtransMiddleware.AddParachainIBCInfo(suite.chainB.GetContext(), "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878", "channel-0", sdk.DefaultBondDenom, sdk.DefaultBondDenom) + suite.Require().NoError(err) + }, + }, + } + for _, tc := range testCases { + suite.Run(tc.name, func() { + suite.SetupTest() // reset + + pathAB = NewTransferPath(suite.chainA, suite.chainB) + pathCB = NewTransferPath(suite.chainC, suite.chainB) + suite.coordinator.Setup(pathAB) + suite.coordinator.Setup(pathCB) + + tc.malleate() + + originalChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + // chainB.SenderAccount: 10000000000000000000stake + originalChainBBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + originalChainCBalance := suite.chainC.AllBalances(suite.chainC.SenderAccount.GetAddress()) + originalChainBTotalSupply, err := suite.chainB.GetBankKeeper().TotalSupply(suite.chainB.GetContext(), &banktypes.QueryTotalSupplyRequest{}) + suite.Require().NoError(err) + + // Send from A to B + msg := ibctransfertypes.NewMsgTransfer(pathAB.EndpointA.ChannelConfig.PortID, pathAB.EndpointA.ChannelID, coinChainASendToB, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, pathAB.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(pathAB) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // Send from C to B + msg = ibctransfertypes.NewMsgTransfer(pathCB.EndpointA.ChannelConfig.PortID, pathCB.EndpointA.ChannelID, coinChainCSentToB, suite.chainC.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainC.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, pathCB.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainC.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain C + err = suite.coordinator.RelayAndAckPendingPackets(pathCB) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainC.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and source chain balance was decreased + newChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainABalance.Sub(tc.expChainABalanceDiff), newChainABalance) + + newChainCBalance := suite.chainC.AllBalances(suite.chainC.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainCBalance.Sub(tc.expChainCBalanceDiff), newChainCBalance) + + // and dest chain balance contains voucher + expBalance := originalChainBBalance.Add(expChainBBalanceDiff...) + gotBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) + suite.Require().NoError(err) + + totalSupply, err := suite.chainB.GetBankKeeper().TotalSupply(suite.chainB.GetContext(), &banktypes.QueryTotalSupplyRequest{}) + suite.Require().NoError(err) + suite.Require().Equal(totalSupply.Supply, originalChainBTotalSupply.Supply.Add(tc.expTotalSupplyDiff...)) + }) + } +} diff --git a/custom/bank/keeper/keeper.go b/custom/bank/keeper/keeper.go index f5a6c8392..2e5b54536 100644 --- a/custom/bank/keeper/keeper.go +++ b/custom/bank/keeper/keeper.go @@ -10,11 +10,11 @@ import ( bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" "github.com/cosmos/cosmos-sdk/x/bank/types" stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" - banktypes "github.com/notional-labs/centauri/v3/custom/bank/types" + banktypes "github.com/notional-labs/centauri/v4/custom/bank/types" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - transfermiddlewarekeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + transfermiddlewarekeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" alliancekeeper "github.com/terra-money/alliance/x/alliance/keeper" alliancetypes "github.com/terra-money/alliance/x/alliance/types" diff --git a/custom/bank/module.go b/custom/bank/module.go index 848f0a2ee..431fffbb9 100644 --- a/custom/bank/module.go +++ b/custom/bank/module.go @@ -10,7 +10,7 @@ import ( bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" "github.com/cosmos/cosmos-sdk/x/bank/types" - custombankkeeper "github.com/notional-labs/centauri/v3/custom/bank/keeper" + custombankkeeper "github.com/notional-labs/centauri/v4/custom/bank/keeper" ) // AppModule wraps around the bank module and the bank keeper to return the right total supply ignoring bonded tokens diff --git a/go.mod b/go.mod index 64bf68d03..1867690b4 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -module github.com/notional-labs/centauri/v3 +module github.com/notional-labs/centauri/v4 go 1.19 @@ -12,6 +12,7 @@ require ( github.com/cosmos/cosmos-sdk v0.47.3 github.com/cosmos/gogoproto v1.4.10 github.com/cosmos/ibc-go/v7 v7.0.1 + github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -44,7 +45,6 @@ require ( github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/go-playground/locales v0.14.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect @@ -172,7 +172,7 @@ require ( golang.org/x/sys v0.7.0 // indirect golang.org/x/term v0.7.0 // indirect golang.org/x/text v0.9.0 // indirect - google.golang.org/protobuf v1.30.0 // indirect + google.golang.org/protobuf v1.30.0 gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect nhooyr.io/websocket v1.8.6 // indirect @@ -184,11 +184,11 @@ replace ( // Use the keyring specified by the cosmos-sdk github.com/99designs/keyring => github.com/cosmos/keyring v1.1.7-0.20210622111912-ef00f8ac3d76 // lock wasmvm so we do not break the grandpa contract - github.com/CosmWasm/wasmvm => github.com/CosmWasm/wasmvm v1.2.2 + github.com/CosmWasm/wasmvm => github.com/CosmWasm/wasmvm v1.2.1 // ibc-go with wasm client - github.com/cosmos/ibc-go/v7 => github.com/notional-labs/ibc-go/v7 v7.0.0-wasm-client + github.com/cosmos/ibc-go/v7 => github.com/notional-labs/ibc-go/v7 v7.0.1-wasm-client - github.com/strangelove-ventures/packet-forward-middleware/v7 => github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230615054050-5523db83e69a + github.com/strangelove-ventures/packet-forward-middleware/v7 => github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230719072346-7acf9377aac3 github.com/syndtr/goleveldb => github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 github.com/terra-money/alliance => github.com/notional-labs/alliance v1.0.1-0.20230523105704-66dba9499c01 diff --git a/go.sum b/go.sum index 2c21732b5..7d76def83 100644 --- a/go.sum +++ b/go.sum @@ -214,8 +214,8 @@ github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQ github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/CosmWasm/wasmd v0.40.1 h1:LxbO78t/6S8TkeQlUrJ0m5O87HtAwLx4RGHq3rdrOEU= github.com/CosmWasm/wasmd v0.40.1/go.mod h1:6EOwnv7MpuFaEqxcUOdFV9i4yvrdOciaY6VQ1o7A3yg= -github.com/CosmWasm/wasmvm v1.2.2 h1:F1lTaMRYxk91V9ufV8gYhzLea7sd6AG/GvY1h2JtZsY= -github.com/CosmWasm/wasmvm v1.2.2/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= +github.com/CosmWasm/wasmvm v1.2.1 h1:si0tRsRDdUShV0k51Wn6zRKlmj3/WWP9Yr4cLmDTf+8= +github.com/CosmWasm/wasmvm v1.2.1/go.mod h1:vW/E3h8j9xBQs9bCoijDuawKo9kCtxOaS8N8J7KFtkc= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= @@ -770,10 +770,10 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/notional-labs/alliance v1.0.1-0.20230523105704-66dba9499c01 h1:koCsoc1NGpU8TntXwM/s7Z/whsKNDVPiLim35HLBAzA= github.com/notional-labs/alliance v1.0.1-0.20230523105704-66dba9499c01/go.mod h1:GFQ8TsXDMTpu7kif0Dwddz6rxazy0ZJQHfN38ZmAodI= -github.com/notional-labs/ibc-go/v7 v7.0.0-wasm-client h1:oQxReLXp48oCIW1zYL+YYg7Nr3L5ginxzJjWXSk1zHM= -github.com/notional-labs/ibc-go/v7 v7.0.0-wasm-client/go.mod h1:ISHo/Qitjtvj2svGmttaZv03zVXmS+uqvUyF9kFqlI0= -github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230615054050-5523db83e69a h1:MMb5U1UydbPHdQGmvLBy5/KgX9NUb8tY6H5aJvXXKjI= -github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230615054050-5523db83e69a/go.mod h1:9C/cvYi6C0YqCc7sh0Eyv+pRYCXnt1DkJ1AN9q555Hw= +github.com/notional-labs/ibc-go/v7 v7.0.1-wasm-client h1:5EF/ea5DLD0QTiNCOysxe4+dQ5qCqon4+frCm+2JCYI= +github.com/notional-labs/ibc-go/v7 v7.0.1-wasm-client/go.mod h1:ISHo/Qitjtvj2svGmttaZv03zVXmS+uqvUyF9kFqlI0= +github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230719072346-7acf9377aac3 h1:LBK82QivQaiycoKZPGDV3fJL1KTWy8AAoerEGUno/60= +github.com/notional-labs/packet-forward-middleware/v7 v7.0.0-20230719072346-7acf9377aac3/go.mod h1:L7nnQ9rEoYLyulE9M/zhcBcHDWqMMSQn8cf4yY5A+T8= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= diff --git a/proto/centauri/mint/v1beta1/genesis.proto b/proto/centauri/mint/v1beta1/genesis.proto index 40d80aff0..a30271a06 100644 --- a/proto/centauri/mint/v1beta1/genesis.proto +++ b/proto/centauri/mint/v1beta1/genesis.proto @@ -10,13 +10,13 @@ option go_package = "x/mint/types"; // GenesisState defines the mint module's genesis state. message GenesisState { // minter is a space for holding current inflation information. - Minter minter = 1 [(gogoproto.nullable) = false]; + Minter minter = 1 [ (gogoproto.nullable) = false ]; // params defines all the paramaters of the module. - Params params = 2 [(gogoproto.nullable) = false]; + Params params = 2 [ (gogoproto.nullable) = false ]; cosmos.base.v1beta1.Coin incentives_supply = 3 [ (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coin", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; } diff --git a/proto/centauri/mint/v1beta1/mint.proto b/proto/centauri/mint/v1beta1/mint.proto index 1c2b14033..fb898d8c3 100644 --- a/proto/centauri/mint/v1beta1/mint.proto +++ b/proto/centauri/mint/v1beta1/mint.proto @@ -10,15 +10,15 @@ import "cosmos_proto/cosmos.proto"; message Minter { // current annual inflation rate string inflation = 1 [ - (cosmos_proto.scalar) = "cosmos.Dec", + (cosmos_proto.scalar) = "cosmos.Dec", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; // current annual expected provisions string annual_provisions = 2 [ - (cosmos_proto.scalar) = "cosmos.Dec", + (cosmos_proto.scalar) = "cosmos.Dec", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; } @@ -30,28 +30,28 @@ message Params { string mint_denom = 1; // maximum annual change in inflation rate string inflation_rate_change = 2 [ - (cosmos_proto.scalar) = "cosmos.Dec", + (cosmos_proto.scalar) = "cosmos.Dec", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; // goal of percent bonded atoms string goal_bonded = 3 [ - (cosmos_proto.scalar) = "cosmos.Dec", + (cosmos_proto.scalar) = "cosmos.Dec", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; // expected blocks per year uint64 blocks_per_year = 4; // expected max amount of token to be distribute per year - string max_token_per_year = 6[ - (cosmos_proto.scalar) = "cosmos.Int", + string max_token_per_year = 6 [ + (cosmos_proto.scalar) = "cosmos.Int", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", - (gogoproto.nullable) = false -]; + (gogoproto.nullable) = false + ]; // expected mint amount of token to be distribute per year string min_token_per_year = 7 [ - (cosmos_proto.scalar) = "cosmos.Dec", + (cosmos_proto.scalar) = "cosmos.Dec", (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", - (gogoproto.nullable) = false + (gogoproto.nullable) = false ]; } diff --git a/proto/centauri/mint/v1beta1/query.proto b/proto/centauri/mint/v1beta1/query.proto index 924e37143..f07d158d1 100644 --- a/proto/centauri/mint/v1beta1/query.proto +++ b/proto/centauri/mint/v1beta1/query.proto @@ -20,7 +20,8 @@ service Query { } // AnnualProvisions current minting annual provisions value. - rpc AnnualProvisions(QueryAnnualProvisionsRequest) returns (QueryAnnualProvisionsResponse) { + rpc AnnualProvisions(QueryAnnualProvisionsRequest) + returns (QueryAnnualProvisionsResponse) { option (google.api.http).get = "/cosmos/mint/v1beta1/annual_provisions"; } } @@ -31,7 +32,7 @@ message QueryParamsRequest {} // QueryParamsResponse is the response type for the Query/Params RPC method. message QueryParamsResponse { // params defines the parameters of the module. - Params params = 1 [(gogoproto.nullable) = false]; + Params params = 1 [ (gogoproto.nullable) = false ]; } // QueryInflationRequest is the request type for the Query/Inflation RPC method. @@ -41,7 +42,10 @@ message QueryInflationRequest {} // method. message QueryInflationResponse { // inflation is the current minting inflation value. - bytes inflation = 1 [(gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", (gogoproto.nullable) = false]; + bytes inflation = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; } // QueryAnnualProvisionsRequest is the request type for the @@ -52,6 +56,8 @@ message QueryAnnualProvisionsRequest {} // Query/AnnualProvisions RPC method. message QueryAnnualProvisionsResponse { // annual_provisions is the current minting annual provisions value. - bytes annual_provisions = 1 - [(gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", (gogoproto.nullable) = false]; + bytes annual_provisions = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Dec", + (gogoproto.nullable) = false + ]; } diff --git a/proto/centauri/mint/v1beta1/tx.proto b/proto/centauri/mint/v1beta1/tx.proto index 96638cd91..429a06d73 100644 --- a/proto/centauri/mint/v1beta1/tx.proto +++ b/proto/centauri/mint/v1beta1/tx.proto @@ -15,8 +15,10 @@ service Msg { option (cosmos.msg.v1.service) = true; rpc UpdateParams(MsgUpdateParams) returns (MsgUpdateParamsResponse); - rpc FundModuleAccount(MsgFundModuleAccount) returns (MsgFundModuleAccountResponse); - rpc AddAccountToFundModuleSet(MsgAddAccountToFundModuleSet) returns (MsgAddAccountToFundModuleSetResponse); + rpc FundModuleAccount(MsgFundModuleAccount) + returns (MsgFundModuleAccountResponse); + rpc AddAccountToFundModuleSet(MsgAddAccountToFundModuleSet) + returns (MsgAddAccountToFundModuleSetResponse); } // MsgUpdateParams is the Msg/UpdateParams request type. @@ -24,15 +26,17 @@ service Msg { // Since: cosmos-sdk 0.47 message MsgUpdateParams { option (cosmos.msg.v1.signer) = "authority"; - option (amino.name) = "centauri/x/mint/MsgUpdateParams"; + option (amino.name) = "centauri/x/mint/MsgUpdateParams"; - // authority is the address that controls the module (defaults to x/gov unless overwritten). - string authority = 1 [(cosmos_proto.scalar) = "cosmos.AddressString"]; + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (cosmos_proto.scalar) = "cosmos.AddressString" ]; // params defines the x/mint parameters to update. // // NOTE: All parameters must be supplied. - Params params = 2 [(gogoproto.nullable) = false, (amino.dont_omitempty) = true]; + Params params = 2 + [ (gogoproto.nullable) = false, (amino.dont_omitempty) = true ]; } // MsgUpdateParamsResponse defines the response structure for executing a @@ -42,8 +46,8 @@ message MsgUpdateParams { message MsgUpdateParamsResponse {} message MsgFundModuleAccount { - string from_address = 1; - repeated cosmos.base.v1beta1.Coin amount = 2 [ + string from_address = 1; + repeated cosmos.base.v1beta1.Coin amount = 2 [ (gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"amount\"", (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins" @@ -54,9 +58,11 @@ message MsgFundModuleAccountResponse {} // MsgAddAccountToFundModuleSet add account in to allowed fund module set message MsgAddAccountToFundModuleSet { - // authority is the address that controls the module (defaults to x/gov unless overwritten). - string authority = 1[ (gogoproto.moretags) = "yaml:\"authority\"" ]; - string allowed_address = 2 [ (gogoproto.moretags) = "yaml:\"allowed_address\"" ]; + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string allowed_address = 2 + [ (gogoproto.moretags) = "yaml:\"allowed_address\"" ]; } message MsgAddAccountToFundModuleSetResponse {} diff --git a/proto/centauri/ratelimit/v1beta1/epoch.proto b/proto/centauri/ratelimit/v1beta1/epoch.proto new file mode 100644 index 000000000..8f0bf2669 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/epoch.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "x/ratelimit/types"; + +message EpochInfo { + // identifier is a unique reference to this particular timer. + string identifier = 1; + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + google.protobuf.Timestamp start_time = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"start_time\"" + ]; + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + google.protobuf.Duration duration = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.jsontag) = "duration,omitempty", + (gogoproto.moretags) = "yaml:\"duration\"" + ]; + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + int64 current_epoch = 4; + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + google.protobuf.Timestamp current_epoch_start_time = 5 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"current_epoch_start_time\"" + ]; + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + bool epoch_counting_started = 6; + reserved 7; + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + int64 current_epoch_start_height = 8; +} \ No newline at end of file diff --git a/proto/centauri/ratelimit/v1beta1/genesis.proto b/proto/centauri/ratelimit/v1beta1/genesis.proto new file mode 100644 index 000000000..091dcb855 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/genesis.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "centauri/ratelimit/v1beta1/params.proto"; +import "centauri/ratelimit/v1beta1/ratelimit.proto"; +import "centauri/ratelimit/v1beta1/epoch.proto"; + +option go_package = "x/ratelimit/types"; + +// GenesisState defines the ratelimit module's genesis state. +message GenesisState { + Params params = 1 [ + (gogoproto.moretags) = "yaml:\"params\"", + (gogoproto.nullable) = false + ]; + + repeated RateLimit rate_limits = 2 [ + (gogoproto.moretags) = "yaml:\"rate_limits\"", + (gogoproto.nullable) = false + ]; + + repeated WhitelistedAddressPair whitelisted_address_pairs = 3 [ + (gogoproto.moretags) = "yaml:\"whitelisted_address_pairs\"", + (gogoproto.nullable) = false + ]; + + repeated string pending_send_packet_sequence_numbers = 4; + + repeated EpochInfo epochs = 5 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/centauri/ratelimit/v1beta1/params.proto b/proto/centauri/ratelimit/v1beta1/params.proto new file mode 100644 index 000000000..22186f8ac --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/params.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +option go_package = "x/ratelimit/types"; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +// Params holds parameters for the mint module. +message Params {} diff --git a/proto/centauri/ratelimit/v1beta1/query.proto b/proto/centauri/ratelimit/v1beta1/query.proto new file mode 100644 index 000000000..7826edc2f --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/query.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "centauri/ratelimit/v1beta1/ratelimit.proto"; +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "x/ratelimit/types"; + +// Query defines the gRPC querier service. +service Query { + rpc AllRateLimits(QueryAllRateLimitsRequest) + returns (QueryAllRateLimitsResponse) { + option (google.api.http).get = "/centauri/ratelimit/ratelimits"; + } + rpc RateLimit(QueryRateLimitRequest) returns (QueryRateLimitResponse) { + option (google.api.http).get = + "/centauri/ratelimit/ratelimit/{channel_id}/by_denom"; + } + rpc RateLimitsByChainId(QueryRateLimitsByChainIdRequest) + returns (QueryRateLimitsByChainIdResponse) { + option (google.api.http).get = "/centauri/ratelimit/ratelimits/{chain_id}"; + } + rpc RateLimitsByChannelId(QueryRateLimitsByChannelIdRequest) + returns (QueryRateLimitsByChannelIdResponse) { + option (google.api.http).get = + "/centauri/ratelimit/ratelimits/{channel_id}"; + } + rpc AllWhitelistedAddresses(QueryAllWhitelistedAddressesRequest) + returns (QueryAllWhitelistedAddressesResponse) { + option (google.api.http).get = "/centauri/ratelimit/whitelisted_addresses"; + } +} + +message QueryAllRateLimitsRequest {} +message QueryAllRateLimitsResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitRequest { + string denom = 1; + string channel_id = 2; +} +message QueryRateLimitResponse { RateLimit rate_limit = 1; } + +message QueryRateLimitsByChainIdRequest { string chain_id = 1; } +message QueryRateLimitsByChainIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitsByChannelIdRequest { string channel_id = 1; } +message QueryRateLimitsByChannelIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryAllWhitelistedAddressesRequest {} +message QueryAllWhitelistedAddressesResponse { + repeated WhitelistedAddressPair address_pairs = 1 + [ (gogoproto.nullable) = false ]; +} \ No newline at end of file diff --git a/proto/centauri/ratelimit/v1beta1/ratelimit.proto b/proto/centauri/ratelimit/v1beta1/ratelimit.proto new file mode 100644 index 000000000..26a2bb0f3 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/ratelimit.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "x/ratelimit/types"; + +enum PacketDirection { + option (gogoproto.goproto_enum_prefix) = false; + + PACKET_SEND = 0; + PACKET_RECV = 1; +} + +message Path { + string denom = 1; + string channel_id = 2; +} + +message Quota { + string max_percent_send = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 3; +} + +message Flow { + string inflow = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string outflow = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string channel_value = 3 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; +} + +message RateLimit { + Path path = 1; + Quota quota = 2; + Flow flow = 3; +} + +message WhitelistedAddressPair { + string sender = 1; + string receiver = 2; +} diff --git a/proto/centauri/ratelimit/v1beta1/tx.proto b/proto/centauri/ratelimit/v1beta1/tx.proto new file mode 100755 index 000000000..4012cb04d --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/tx.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "cosmos/msg/v1/msg.proto"; + +option go_package = "x/ratelimit/types"; + +// Msg defines the transfer middleware Msg service. +service Msg { + rpc AddTransferRateLimit(MsgAddRateLimit) returns (MsgAddRateLimitResponse); + rpc UpdateTransferRateLimit(MsgUpdateRateLimit) returns (MsgUpdateRateLimitResponse); + rpc RemoveTransferRateLimit(MsgRemoveRateLimit) returns (MsgRemoveRateLimitResponse); + rpc ResetTransferRateLimit(MsgResetRateLimit) returns (MsgResetRateLimitResponse); +} + +message MsgAddRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; + string max_percent_send = 4 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 6; +} + +message MsgAddRateLimitResponse {} + +message MsgUpdateRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; + string max_percent_send = 4 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 6; +} + +message MsgUpdateRateLimitResponse {} + +message MsgRemoveRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; +} + +message MsgRemoveRateLimitResponse {} + +message MsgResetRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; +} + +message MsgResetRateLimitResponse {} diff --git a/proto/centauri/transfermiddleware/v1beta1/genesis.proto b/proto/centauri/transfermiddleware/v1beta1/genesis.proto index fe9490b12..ebd341cac 100644 --- a/proto/centauri/transfermiddleware/v1beta1/genesis.proto +++ b/proto/centauri/transfermiddleware/v1beta1/genesis.proto @@ -2,23 +2,16 @@ syntax = "proto3"; package centauri.transfermiddleware.v1beta1; import "gogoproto/gogo.proto"; +import "centauri/transfermiddleware/v1beta1/params.proto"; +import "centauri/transfermiddleware/v1beta1/parachain_token_info.proto"; option go_package = "x/transfermiddleware/types"; // GenesisState defines the module various parameters when first // initialized message GenesisState { - repeated ParachainIBCTokenInfo token_infos = 1 [ (gogoproto.nullable) = false ]; + repeated ParachainIBCTokenInfo token_infos = 1 + [ (gogoproto.nullable) = false ]; + Params params = 2 [ (gogoproto.nullable) = false ]; } - -// ParachainIBCTokenInfo represents information about transferable IBC tokens from Parachain. -message ParachainIBCTokenInfo{ - // ibc_denom is the denomination of the ibced token transferred from the dotsama chain. - string ibc_denom = 1 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; - // channel_id is source channel in IBC connection from centauri chain - string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; - // native denom is new native minted denom in centauri chain. - string native_denom = 3 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; - // asset id is the id of the asset on Picasso - string asset_id = 4 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; -} \ No newline at end of file + diff --git a/proto/centauri/transfermiddleware/v1beta1/parachain_token_info.proto b/proto/centauri/transfermiddleware/v1beta1/parachain_token_info.proto new file mode 100644 index 000000000..f570c3f9b --- /dev/null +++ b/proto/centauri/transfermiddleware/v1beta1/parachain_token_info.proto @@ -0,0 +1,33 @@ +syntax = "proto3"; +package centauri.transfermiddleware.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "x/transfermiddleware/types"; + +// ParachainIBCTokenInfo represents information about transferable IBC tokens +// from Parachain. +message ParachainIBCTokenInfo { + // ibc_denom is the denomination of the ibced token transferred from the + // dotsama chain. + string ibc_denom = 1 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; + // channel_id is source channel in IBC connection from centauri chain + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + // native denom is new native minted denom in centauri chain. + string native_denom = 3 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; + // asset id is the id of the asset on Picasso + string asset_id = 4 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; +} + +message RemoveParachainIBCTokenInfo { + // native denom is new native minted denom in centauri chain. + string native_denom = 1 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; + // + // remove_time is the time at which the parachain token info will be removed. + google.protobuf.Timestamp remove_time = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"start_time\"" + ]; +} \ No newline at end of file diff --git a/proto/centauri/transfermiddleware/v1beta1/params.proto b/proto/centauri/transfermiddleware/v1beta1/params.proto new file mode 100644 index 000000000..d93818302 --- /dev/null +++ b/proto/centauri/transfermiddleware/v1beta1/params.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package centauri.transfermiddleware.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; + +option go_package = "x/transfermiddleware/types"; + +// Params holds parameters for the mint module. +message Params { + google.protobuf.Duration duration = 1 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.jsontag) = "duration,omitempty", + (gogoproto.moretags) = "yaml:\"duration\"" + ]; +} \ No newline at end of file diff --git a/proto/centauri/transfermiddleware/v1beta1/query.proto b/proto/centauri/transfermiddleware/v1beta1/query.proto index 1d30ca5b4..fc12b0249 100644 --- a/proto/centauri/transfermiddleware/v1beta1/query.proto +++ b/proto/centauri/transfermiddleware/v1beta1/query.proto @@ -9,33 +9,38 @@ option go_package = "x/transfermiddleware/types"; // Query provides defines the gRPC querier service. service Query { // ParaTokenInfo queries all token info of a native denom. - rpc ParaTokenInfo(QueryParaTokenInfoRequest) returns (QueryParaTokenInfoResponse) { + rpc ParaTokenInfo(QueryParaTokenInfoRequest) + returns (QueryParaTokenInfoResponse) { option (google.api.http).get = "/centauri/paratokeninfo"; } - rpc EscrowAddress(QueryEscrowAddressRequest) returns (QueryEscrowAddressResponse) { + rpc EscrowAddress(QueryEscrowAddressRequest) + returns (QueryEscrowAddressResponse) { option (google.api.http).get = "/centauri/escrowaddress"; } } -// message QueryEscrowAddressRequest +// message QueryEscrowAddressRequest message QueryEscrowAddressRequest { - string channel_id = 1 [(gogoproto.moretags) = "yaml:\"channel_id\""]; + string channel_id = 1 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; } -// QueryEscrowAddressResponse +// QueryEscrowAddressResponse message QueryEscrowAddressResponse { - string escrow_address = 1 [(gogoproto.moretags) = "yaml:\"escrow_address\""]; + string escrow_address = 1 + [ (gogoproto.moretags) = "yaml:\"escrow_address\"" ]; } -// QueryParaTokenInfoRequest is the request type for the Query/Params RPC method. +// QueryParaTokenInfoRequest is the request type for the Query/Params RPC +// method. message QueryParaTokenInfoRequest { - string native_denom = 1 [(gogoproto.moretags) = "yaml:\"native_denom\""]; + string native_denom = 1 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; } -// QueryParaTokenInfoResponse is the response type for the Query/ParaTokenInfo RPC method. +// QueryParaTokenInfoResponse is the response type for the Query/ParaTokenInfo +// RPC method. message QueryParaTokenInfoResponse { - string ibc_denom = 1 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; - string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; - string native_denom = 3 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; - string asset_id = 4 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; + string ibc_denom = 1 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + string native_denom = 3 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; + string asset_id = 4 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; } diff --git a/proto/centauri/transfermiddleware/v1beta1/tx.proto b/proto/centauri/transfermiddleware/v1beta1/tx.proto index 0d42610c3..a11e679c9 100644 --- a/proto/centauri/transfermiddleware/v1beta1/tx.proto +++ b/proto/centauri/transfermiddleware/v1beta1/tx.proto @@ -8,50 +8,53 @@ option go_package = "x/transfermiddleware/types"; // Msg defines the transfer middleware Msg service. service Msg { - rpc AddParachainIBCTokenInfo(MsgAddParachainIBCTokenInfo) returns (MsgAddParachainIBCTokenInfoResponse); - rpc RemoveParachainIBCTokenInfo(MsgRemoveParachainIBCTokenInfo) returns (MsgRemoveParachainIBCTokenInfoResponse); - rpc AddRlyAddress(MsgAddRlyAddress) returns (MsgAddRlyAddressResponse); + rpc AddParachainIBCTokenInfo(MsgAddParachainIBCTokenInfo) + returns (MsgAddParachainIBCTokenInfoResponse); + rpc RemoveParachainIBCTokenInfo(MsgRemoveParachainIBCTokenInfo) + returns (MsgRemoveParachainIBCTokenInfoResponse); + rpc AddRlyAddress(MsgAddRlyAddress) returns (MsgAddRlyAddressResponse); } // MsgAddParachainInfo represents a message to add new parachain info. message MsgAddParachainIBCTokenInfo { - option (cosmos.msg.v1.signer) = "authority"; - - // authority is the address that controls the module (defaults to x/gov unless overwritten). - string authority = 1[ (gogoproto.moretags) = "yaml:\"authority\"" ];; - - string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; - string ibc_denom = 3 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; - string native_denom = 4 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; - string asset_id = 5 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; -} - -message MsgAddParachainIBCTokenInfoResponse { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; + string ibc_denom = 3 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; + string native_denom = 4 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; + string asset_id = 5 [ (gogoproto.moretags) = "yaml:\"asset_id\"" ]; } +message MsgAddParachainIBCTokenInfoResponse {} -// MsgRemoveParachainIBCTokenInfo represents a message to remove new parachain info. +// MsgRemoveParachainIBCTokenInfo represents a message to remove new parachain +// info. message MsgRemoveParachainIBCTokenInfo { - option (cosmos.msg.v1.signer) = "authority"; + option (cosmos.msg.v1.signer) = "authority"; - // authority is the address that controls the module (defaults to x/gov unless overwritten). - string authority = 1[ (gogoproto.moretags) = "yaml:\"authority\"" ];; + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + ; - string native_denom = 2 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; + string native_denom = 2 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; } -message MsgRemoveParachainIBCTokenInfoResponse { -} +message MsgRemoveParachainIBCTokenInfoResponse {} // MsgAddRlyAddress represents a message to add new rly address to allow list message MsgAddRlyAddress { - option (cosmos.msg.v1.signer) = "authority"; + option (cosmos.msg.v1.signer) = "authority"; - // authority is the address that controls the module (defaults to x/gov unless overwritten). - string authority = 1[ (gogoproto.moretags) = "yaml:\"authority\"" ];; + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + ; - string rly_address = 2 [ (gogoproto.moretags) = "yaml:\"rly_address\"" ]; + string rly_address = 2 [ (gogoproto.moretags) = "yaml:\"rly_address\"" ]; } -message MsgAddRlyAddressResponse { -} \ No newline at end of file +message MsgAddRlyAddressResponse {} \ No newline at end of file diff --git a/tests/ibc-hooks/bytecode/counter.wasm b/tests/ibc-hooks/bytecode/counter.wasm new file mode 100644 index 000000000..938171ef8 Binary files /dev/null and b/tests/ibc-hooks/bytecode/counter.wasm differ diff --git a/tests/ibc-hooks/bytecode/echo.wasm b/tests/ibc-hooks/bytecode/echo.wasm new file mode 100644 index 000000000..4feb4554e Binary files /dev/null and b/tests/ibc-hooks/bytecode/echo.wasm differ diff --git a/x/ibc-hooks/client/cli/query.go b/x/ibc-hooks/client/cli/query.go new file mode 100644 index 000000000..0350649e4 --- /dev/null +++ b/x/ibc-hooks/client/cli/query.go @@ -0,0 +1,76 @@ +package cli + +import ( + "fmt" + "strings" + + "github.com/cosmos/cosmos-sdk/client/flags" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/version" + "github.com/notional-labs/centauri/v4/x/ibc-hooks/keeper" + "github.com/spf13/cobra" + + "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" +) + +func indexRunCmd(cmd *cobra.Command, args []string) error { + usageTemplate := `Usage:{{if .HasAvailableSubCommands}} + {{.CommandPath}} [command]{{end}} + +{{if .HasAvailableSubCommands}}Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}} + +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` + cmd.SetUsageTemplate(usageTemplate) + return cmd.Help() +} + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: indexRunCmd, + } + + cmd.AddCommand( + GetCmdWasmSender(), + ) + return cmd +} + +// GetCmdPoolParams return pool params. +func GetCmdWasmSender() *cobra.Command { + cmd := &cobra.Command{ + Use: "wasm-sender ", + Short: "Generate the local address for a wasm hooks sender", + Long: strings.TrimSpace( + fmt.Sprintf(`Generate the local address for a wasm hooks sender. +Example: +$ %s query ibc-hooks wasm-hooks-sender channel-42 juno12smx2wdlyttvyzvzg54y2vnqwq2qjatezqwqxu +`, + version.AppName, + ), + ), + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + channelID := args[0] + originalSender := args[1] + // ToDo: Make this flexible as an arg + prefix := sdk.GetConfig().GetBech32AccountAddrPrefix() + senderBech32, err := keeper.DeriveIntermediateSender(channelID, originalSender, prefix) + if err != nil { + return err + } + fmt.Println(senderBech32) + return nil + }, + } + + flags.AddQueryFlagsToCmd(cmd) + + return cmd +} diff --git a/x/ibc-hooks/hooks.go b/x/ibc-hooks/hooks.go new file mode 100644 index 000000000..2a2fc2cd3 --- /dev/null +++ b/x/ibc-hooks/hooks.go @@ -0,0 +1,171 @@ +package ibc_hooks + +import ( + // external libraries + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + + // ibc-go + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +type Hooks interface{} + +type OnChanOpenInitOverrideHooks interface { + OnChanOpenInitOverride(im IBCMiddleware, ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, version string) (string, error) +} +type OnChanOpenInitBeforeHooks interface { + OnChanOpenInitBeforeHook(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, version string) +} +type OnChanOpenInitAfterHooks interface { + OnChanOpenInitAfterHook(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID string, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, version string, finalVersion string, err error) +} + +// OnChanOpenTry Hooks +type OnChanOpenTryOverrideHooks interface { + OnChanOpenTryOverride(im IBCMiddleware, ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, counterpartyVersion string) (string, error) +} +type OnChanOpenTryBeforeHooks interface { + OnChanOpenTryBeforeHook(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, counterpartyVersion string) +} +type OnChanOpenTryAfterHooks interface { + OnChanOpenTryAfterHook(ctx sdk.Context, order channeltypes.Order, connectionHops []string, portID, channelID string, channelCap *capabilitytypes.Capability, counterparty channeltypes.Counterparty, counterpartyVersion string, version string, err error) +} + +// OnChanOpenAck Hooks +type OnChanOpenAckOverrideHooks interface { + OnChanOpenAckOverride(im IBCMiddleware, ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string) error +} +type OnChanOpenAckBeforeHooks interface { + OnChanOpenAckBeforeHook(ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string) +} +type OnChanOpenAckAfterHooks interface { + OnChanOpenAckAfterHook(ctx sdk.Context, portID, channelID string, counterpartyChannelID string, counterpartyVersion string, err error) +} + +// OnChanOpenConfirm Hooks +type OnChanOpenConfirmOverrideHooks interface { + OnChanOpenConfirmOverride(im IBCMiddleware, ctx sdk.Context, portID, channelID string) error +} +type OnChanOpenConfirmBeforeHooks interface { + OnChanOpenConfirmBeforeHook(ctx sdk.Context, portID, channelID string) +} +type OnChanOpenConfirmAfterHooks interface { + OnChanOpenConfirmAfterHook(ctx sdk.Context, portID, channelID string, err error) +} + +// OnChanCloseInit Hooks +type OnChanCloseInitOverrideHooks interface { + OnChanCloseInitOverride(im IBCMiddleware, ctx sdk.Context, portID, channelID string) error +} +type OnChanCloseInitBeforeHooks interface { + OnChanCloseInitBeforeHook(ctx sdk.Context, portID, channelID string) +} +type OnChanCloseInitAfterHooks interface { + OnChanCloseInitAfterHook(ctx sdk.Context, portID, channelID string, err error) +} + +// OnChanCloseConfirm Hooks +type OnChanCloseConfirmOverrideHooks interface { + OnChanCloseConfirmOverride(im IBCMiddleware, ctx sdk.Context, portID, channelID string) error +} +type OnChanCloseConfirmBeforeHooks interface { + OnChanCloseConfirmBeforeHook(ctx sdk.Context, portID, channelID string) +} +type OnChanCloseConfirmAfterHooks interface { + OnChanCloseConfirmAfterHook(ctx sdk.Context, portID, channelID string, err error) +} + +// OnRecvPacket Hooks +type OnRecvPacketOverrideHooks interface { + OnRecvPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement +} +type OnRecvPacketBeforeHooks interface { + OnRecvPacketBeforeHook(ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) +} +type OnRecvPacketAfterHooks interface { + OnRecvPacketAfterHook(ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress, ack ibcexported.Acknowledgement) +} + +// OnAcknowledgementPacket Hooks +type OnAcknowledgementPacketOverrideHooks interface { + OnAcknowledgementPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) error +} +type OnAcknowledgementPacketBeforeHooks interface { + OnAcknowledgementPacketBeforeHook(ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) +} +type OnAcknowledgementPacketAfterHooks interface { + OnAcknowledgementPacketAfterHook(ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress, err error) +} + +// OnTimeoutPacket Hooks +type OnTimeoutPacketOverrideHooks interface { + OnTimeoutPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) error +} +type OnTimeoutPacketBeforeHooks interface { + OnTimeoutPacketBeforeHook(ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) +} +type OnTimeoutPacketAfterHooks interface { + OnTimeoutPacketAfterHook(ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress, err error) +} + +// SendPacket Hooks +type SendPacketOverrideHooks interface { + SendPacketOverride( + i ICS4Middleware, + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, + ) (sequence uint64, err error) +} +type SendPacketBeforeHooks interface { + SendPacketBeforeHook( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, + ) +} +type SendPacketAfterHooks interface { + SendPacketAfterHook( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, + err error, + ) +} + +// WriteAcknowledgement Hooks +type WriteAcknowledgementOverrideHooks interface { + WriteAcknowledgementOverride(i ICS4Middleware, ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) error +} +type WriteAcknowledgementBeforeHooks interface { + WriteAcknowledgementBeforeHook(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) +} +type WriteAcknowledgementAfterHooks interface { + WriteAcknowledgementAfterHook(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, ack ibcexported.Acknowledgement, err error) +} + +// GetAppVersion Hooks +type GetAppVersionOverrideHooks interface { + GetAppVersionOverride(i ICS4Middleware, ctx sdk.Context, portID, channelID string) (string, bool) +} +type GetAppVersionBeforeHooks interface { + GetAppVersionBeforeHook(ctx sdk.Context, portID, channelID string) +} +type GetAppVersionAfterHooks interface { + GetAppVersionAfterHook(ctx sdk.Context, portID, channelID string, result string, success bool) +} diff --git a/x/ibc-hooks/ibc_module.go b/x/ibc-hooks/ibc_module.go new file mode 100644 index 000000000..0d876f891 --- /dev/null +++ b/x/ibc-hooks/ibc_module.go @@ -0,0 +1,262 @@ +package ibc_hooks + +import ( + // external libraries + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + + // ibc-go + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +var _ porttypes.Middleware = &IBCMiddleware{} + +type IBCMiddleware struct { + App porttypes.IBCModule + ICS4Middleware *ICS4Middleware +} + +func NewIBCMiddleware(app porttypes.IBCModule, ics4 *ICS4Middleware) IBCMiddleware { + return IBCMiddleware{ + App: app, + ICS4Middleware: ics4, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenInitOverrideHooks); ok { + return hook.OnChanOpenInitOverride(im, ctx, order, connectionHops, portID, channelID, channelCap, counterparty, version) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenInitBeforeHooks); ok { + hook.OnChanOpenInitBeforeHook(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, version) + } + + finalVersion, err := im.App.OnChanOpenInit(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, version) + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenInitAfterHooks); ok { + hook.OnChanOpenInitAfterHook(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, version, finalVersion, err) + } + return version, err +} + +// OnChanOpenTry implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenTryOverrideHooks); ok { + return hook.OnChanOpenTryOverride(im, ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenTryBeforeHooks); ok { + hook.OnChanOpenTryBeforeHook(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) + } + + version, err := im.App.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenTryAfterHooks); ok { + hook.OnChanOpenTryAfterHook(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion, version, err) + } + return version, err +} + +// OnChanOpenAck implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyChannelID string, + counterpartyVersion string, +) error { + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenAckOverrideHooks); ok { + return hook.OnChanOpenAckOverride(im, ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenAckBeforeHooks); ok { + hook.OnChanOpenAckBeforeHook(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) + } + err := im.App.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenAckAfterHooks); ok { + hook.OnChanOpenAckAfterHook(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion, err) + } + + return err +} + +// OnChanOpenConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenConfirmOverrideHooks); ok { + return hook.OnChanOpenConfirmOverride(im, ctx, portID, channelID) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenConfirmBeforeHooks); ok { + hook.OnChanOpenConfirmBeforeHook(ctx, portID, channelID) + } + err := im.App.OnChanOpenConfirm(ctx, portID, channelID) + if hook, ok := im.ICS4Middleware.Hooks.(OnChanOpenConfirmAfterHooks); ok { + hook.OnChanOpenConfirmAfterHook(ctx, portID, channelID, err) + } + return err +} + +// OnChanCloseInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + // Here we can remove the limits when a new channel is closed. For now, they can remove them manually on the contract + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseInitOverrideHooks); ok { + return hook.OnChanCloseInitOverride(im, ctx, portID, channelID) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseInitBeforeHooks); ok { + hook.OnChanCloseInitBeforeHook(ctx, portID, channelID) + } + err := im.App.OnChanCloseInit(ctx, portID, channelID) + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseInitAfterHooks); ok { + hook.OnChanCloseInitAfterHook(ctx, portID, channelID, err) + } + + return err +} + +// OnChanCloseConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + // Here we can remove the limits when a new channel is closed. For now, they can remove them manually on the contract + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseConfirmOverrideHooks); ok { + return hook.OnChanCloseConfirmOverride(im, ctx, portID, channelID) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseConfirmBeforeHooks); ok { + hook.OnChanCloseConfirmBeforeHook(ctx, portID, channelID) + } + err := im.App.OnChanCloseConfirm(ctx, portID, channelID) + if hook, ok := im.ICS4Middleware.Hooks.(OnChanCloseConfirmAfterHooks); ok { + hook.OnChanCloseConfirmAfterHook(ctx, portID, channelID, err) + } + + return err +} + +// OnRecvPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) ibcexported.Acknowledgement { + if hook, ok := im.ICS4Middleware.Hooks.(OnRecvPacketOverrideHooks); ok { + return hook.OnRecvPacketOverride(im, ctx, packet, relayer) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnRecvPacketBeforeHooks); ok { + hook.OnRecvPacketBeforeHook(ctx, packet, relayer) + } + + ack := im.App.OnRecvPacket(ctx, packet, relayer) + + if hook, ok := im.ICS4Middleware.Hooks.(OnRecvPacketAfterHooks); ok { + hook.OnRecvPacketAfterHook(ctx, packet, relayer, ack) + } + + return ack +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + relayer sdk.AccAddress, +) error { + if hook, ok := im.ICS4Middleware.Hooks.(OnAcknowledgementPacketOverrideHooks); ok { + return hook.OnAcknowledgementPacketOverride(im, ctx, packet, acknowledgement, relayer) + } + if hook, ok := im.ICS4Middleware.Hooks.(OnAcknowledgementPacketBeforeHooks); ok { + hook.OnAcknowledgementPacketBeforeHook(ctx, packet, acknowledgement, relayer) + } + + err := im.App.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) + + if hook, ok := im.ICS4Middleware.Hooks.(OnAcknowledgementPacketAfterHooks); ok { + hook.OnAcknowledgementPacketAfterHook(ctx, packet, acknowledgement, relayer, err) + } + + return err +} + +// OnTimeoutPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) error { + if hook, ok := im.ICS4Middleware.Hooks.(OnTimeoutPacketOverrideHooks); ok { + return hook.OnTimeoutPacketOverride(im, ctx, packet, relayer) + } + + if hook, ok := im.ICS4Middleware.Hooks.(OnTimeoutPacketBeforeHooks); ok { + hook.OnTimeoutPacketBeforeHook(ctx, packet, relayer) + } + err := im.App.OnTimeoutPacket(ctx, packet, relayer) + if hook, ok := im.ICS4Middleware.Hooks.(OnTimeoutPacketAfterHooks); ok { + hook.OnTimeoutPacketAfterHook(ctx, packet, relayer, err) + } + + return err +} + +// SendPacket implements the ICS4 Wrapper interface +func (im IBCMiddleware) SendPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort, sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + return im.ICS4Middleware.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface +func (im IBCMiddleware) WriteAcknowledgement( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet ibcexported.PacketI, + ack ibcexported.Acknowledgement, +) error { + return im.ICS4Middleware.WriteAcknowledgement(ctx, chanCap, packet, ack) +} + +func (im IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return im.ICS4Middleware.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ibc-hooks/ics4_middleware.go b/x/ibc-hooks/ics4_middleware.go new file mode 100644 index 000000000..ab45dcb7e --- /dev/null +++ b/x/ibc-hooks/ics4_middleware.go @@ -0,0 +1,86 @@ +package ibc_hooks + +import ( + // external libraries + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + + // ibc-go + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +var _ porttypes.ICS4Wrapper = &ICS4Middleware{} + +type ICS4Middleware struct { + channel porttypes.ICS4Wrapper + + // Hooks + Hooks Hooks +} + +func NewICS4Middleware(channel porttypes.ICS4Wrapper, hooks Hooks) ICS4Middleware { + return ICS4Middleware{ + channel: channel, + Hooks: hooks, + } +} + +func (i ICS4Middleware) SendPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort, sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + if hook, ok := i.Hooks.(SendPacketOverrideHooks); ok { + return hook.SendPacketOverride(i, ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + } + + if hook, ok := i.Hooks.(SendPacketBeforeHooks); ok { + hook.SendPacketBeforeHook(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + } + + seq, err := i.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) + + if hook, ok := i.Hooks.(SendPacketAfterHooks); ok { + hook.SendPacketAfterHook(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data, err) + } + + return seq, err +} + +func (i ICS4Middleware) WriteAcknowledgement(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, ack ibcexported.Acknowledgement) error { + if hook, ok := i.Hooks.(WriteAcknowledgementOverrideHooks); ok { + return hook.WriteAcknowledgementOverride(i, ctx, chanCap, packet, ack) + } + + if hook, ok := i.Hooks.(WriteAcknowledgementBeforeHooks); ok { + hook.WriteAcknowledgementBeforeHook(ctx, chanCap, packet, ack) + } + err := i.channel.WriteAcknowledgement(ctx, chanCap, packet, ack) + if hook, ok := i.Hooks.(WriteAcknowledgementAfterHooks); ok { + hook.WriteAcknowledgementAfterHook(ctx, chanCap, packet, ack, err) + } + + return err +} + +func (i ICS4Middleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + if hook, ok := i.Hooks.(GetAppVersionOverrideHooks); ok { + return hook.GetAppVersionOverride(i, ctx, portID, channelID) + } + + if hook, ok := i.Hooks.(GetAppVersionBeforeHooks); ok { + hook.GetAppVersionBeforeHook(ctx, portID, channelID) + } + version, err := i.channel.GetAppVersion(ctx, portID, channelID) + if hook, ok := i.Hooks.(GetAppVersionAfterHooks); ok { + hook.GetAppVersionAfterHook(ctx, portID, channelID, version, err) + } + + return version, err +} diff --git a/x/ibc-hooks/keeper/keeper.go b/x/ibc-hooks/keeper/keeper.go new file mode 100644 index 000000000..26edf4b4a --- /dev/null +++ b/x/ibc-hooks/keeper/keeper.go @@ -0,0 +1,60 @@ +package keeper + +import ( + "fmt" + + "github.com/cometbft/cometbft/libs/log" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" +) + +type ( + Keeper struct { + storeKey storetypes.StoreKey + } +) + +// NewKeeper returns a new instance of the x/ibchooks keeper +func NewKeeper( + storeKey storetypes.StoreKey, +) Keeper { + return Keeper{ + storeKey: storeKey, + } +} + +// Logger returns a logger for the x/tokenfactory module +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +func GetPacketKey(channel string, packetSequence uint64) []byte { + return []byte(fmt.Sprintf("%s::%d", channel, packetSequence)) +} + +// StorePacketCallback stores which contract will be listening for the ack or timeout of a packet +func (k Keeper) StorePacketCallback(ctx sdk.Context, channel string, packetSequence uint64, contract string) { + store := ctx.KVStore(k.storeKey) + store.Set(GetPacketKey(channel, packetSequence), []byte(contract)) +} + +// GetPacketCallback returns the bech32 addr of the contract that is expecting a callback from a packet +func (k Keeper) GetPacketCallback(ctx sdk.Context, channel string, packetSequence uint64) string { + store := ctx.KVStore(k.storeKey) + return string(store.Get(GetPacketKey(channel, packetSequence))) +} + +// DeletePacketCallback deletes the callback from storage once it has been processed +func (k Keeper) DeletePacketCallback(ctx sdk.Context, channel string, packetSequence uint64) { + store := ctx.KVStore(k.storeKey) + store.Delete(GetPacketKey(channel, packetSequence)) +} + +func DeriveIntermediateSender(channel, originalSender, bech32Prefix string) (string, error) { + senderStr := fmt.Sprintf("%s/%s", channel, originalSender) + senderHash32 := address.Hash(types.SenderPrefix, []byte(senderStr)) + sender := sdk.AccAddress(senderHash32[:]) + return sdk.Bech32ifyAddressBytes(bech32Prefix, sender) +} diff --git a/x/ibc-hooks/module.go b/x/ibc-hooks/module.go new file mode 100644 index 000000000..7a03b9407 --- /dev/null +++ b/x/ibc-hooks/module.go @@ -0,0 +1,122 @@ +package ibc_hooks + +import ( + "encoding/json" + + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/spf13/cobra" + + "github.com/notional-labs/centauri/v4/x/ibc-hooks/client/cli" + "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" + + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} +) + +// AppModuleBasic defines the basic application module used by the ibc-hooks module. +type AppModuleBasic struct{} + +var _ module.AppModuleBasic = AppModuleBasic{} + +// Name returns the ibc-hooks module's name. +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec registers the ibc-hooks module's types on the given LegacyAmino codec. +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) {} + +// RegisterInterfaces registers the module's interface types. +func (b AppModuleBasic) RegisterInterfaces(_ cdctypes.InterfaceRegistry) {} + +// DefaultGenesis returns default genesis state as raw bytes for the +// module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + emptyString := "{}" + return []byte(emptyString) +} + +// ValidateGenesis performs genesis state validation for the ibc-hooks module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, config client.TxEncodingConfig, bz json.RawMessage) error { + return nil +} + +// RegisterRESTRoutes registers the REST routes for the ibc-hooks module. +func (AppModuleBasic) RegisterRESTRoutes(clientCtx client.Context, rtr *mux.Router) {} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the ibc-hooks module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) {} + +// GetTxCmd returns no root tx command for the ibc-hooks module. +func (AppModuleBasic) GetTxCmd() *cobra.Command { return nil } + +// GetQueryCmd returns the root query command for the ibc-hooks module. +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// ___________________________________________________________________________ + +// AppModule implements an application module for the ibc-hooks module. +type AppModule struct { + AppModuleBasic +} + +// NewAppModule creates a new AppModule object. +func NewAppModule() AppModule { + return AppModule{ + AppModuleBasic: AppModuleBasic{}, + } +} + +// Name returns the ibc-hooks module's name. +func (AppModule) Name() string { + return types.ModuleName +} + +// RegisterInvariants registers the ibc-hooks module invariants. +func (am AppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} + +// QuerierRoute returns the module's querier route name. +func (AppModule) QuerierRoute() string { + return "" +} + +// RegisterServices registers a gRPC query service to respond to the +// module-specific gRPC queries. +func (am AppModule) RegisterServices(cfg module.Configurator) { +} + +// InitGenesis performs genesis initialization for the ibc-hooks module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + return json.RawMessage([]byte("{}")) +} + +// BeginBlock returns the begin blocker for the ibc-hooks module. +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { +} + +// EndBlock returns the end blocker for the ibc-hooks module. It returns no validator +// updates. +func (AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } diff --git a/x/ibc-hooks/relay_test.go b/x/ibc-hooks/relay_test.go new file mode 100644 index 000000000..996f3d2a0 --- /dev/null +++ b/x/ibc-hooks/relay_test.go @@ -0,0 +1,236 @@ +package ibc_hooks_test + +import ( + "fmt" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" + ibchookskeeper "github.com/notional-labs/centauri/v4/x/ibc-hooks/keeper" + "github.com/stretchr/testify/suite" +) + +// TODO: use testsuite here. +type IBCHooksTestSuite struct { + suite.Suite + + coordinator *customibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *customibctesting.TestChain + chainB *customibctesting.TestChain + chainC *customibctesting.TestChain +} + +func (suite *IBCHooksTestSuite) SetupTest() { + suite.coordinator = customibctesting.NewCoordinator(suite.T(), 4) + suite.chainA = suite.coordinator.GetChain(customibctesting.GetChainID(1)) + suite.chainB = suite.coordinator.GetChain(customibctesting.GetChainID(2)) + suite.chainC = suite.coordinator.GetChain(customibctesting.GetChainID(3)) +} + +func NewTransferPath(chainA, chainB *customibctesting.TestChain) *customibctesting.Path { + path := customibctesting.NewPath(chainA, chainB) + path.EndpointA.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointB.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointA.ChannelConfig.Version = transfertypes.Version + path.EndpointB.ChannelConfig.Version = transfertypes.Version + + return path +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(IBCHooksTestSuite)) +} + +func (suite *IBCHooksTestSuite) TestRecvHooks() { + var ( + transferAmount = sdk.NewInt(1000000000) + timeoutHeight = clienttypes.NewHeight(1, 110) + // when transfer via sdk transfer from A (module) -> B (contract) + // nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // store code + suite.chainB.StoreContractCode(&suite.Suite, "../../tests/ibc-hooks/bytecode/counter.wasm") + // instancetiate contract + addr := suite.chainB.InstantiateContract(&suite.Suite, `{"count": 0}`, 1) + suite.Require().NotEmpty(addr) + + memo := fmt.Sprintf(`{"wasm": {"contract": "%s", "msg": {"increment": {} } } }`, addr) + + msg := transfertypes.NewMsgTransfer( + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), + suite.chainA.SenderAccount.GetAddress().String(), + addr.String(), + timeoutHeight, + 0, + memo, + ) + _, err := suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + senderLocalAcc, err := ibchookskeeper.DeriveIntermediateSender("channel-0", suite.chainA.SenderAccount.GetAddress().String(), "cosmos") + suite.Require().NoError(err) + + state := suite.chainB.QueryContract(&suite.Suite, addr, []byte(fmt.Sprintf(`{"get_count": {"addr": "%s"}}`, senderLocalAcc))) + suite.Require().Equal(`{"count":0}`, state) + + state = suite.chainB.QueryContract(&suite.Suite, addr, []byte(fmt.Sprintf(`{"get_total_funds": {"addr": "%s"}}`, senderLocalAcc))) + suite.Require().Equal(`{"total_funds":[{"denom":"ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878","amount":"1000000000"}]}`, state) +} + +func (suite *IBCHooksTestSuite) TestAckHooks() { + var ( + transferAmount = sdk.NewInt(1000000000) + timeoutHeight = clienttypes.NewHeight(0, 110) + // when transfer via sdk transfer from A (module) -> B (contract) + // nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // store code + suite.chainA.StoreContractCode(&suite.Suite, "../../tests/ibc-hooks/bytecode/counter.wasm") + // instancetiate contract + addr := suite.chainA.InstantiateContract(&suite.Suite, `{"count": 0}`, 1) + suite.Require().NotEmpty(addr) + + fmt.Println(addr.String()) + + // Generate swap instructions for the contract + callbackMemo := fmt.Sprintf(`{"ibc_callback":"%s"}`, addr) + + msg := transfertypes.NewMsgTransfer( + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), + suite.chainA.SenderAccount.GetAddress().String(), + addr.String(), + timeoutHeight, + 0, + callbackMemo, + ) + _, err := suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + state := suite.chainA.QueryContract( + &suite.Suite, addr, + []byte(fmt.Sprintf(`{"get_count": {"addr": "%s"}}`, addr))) + suite.Require().Equal(`{"count":1}`, state) + + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + state = suite.chainA.QueryContract( + &suite.Suite, addr, + []byte(fmt.Sprintf(`{"get_count": {"addr": "%s"}}`, addr))) + suite.Require().Equal(`{"count":2}`, state) +} + +func (suite *IBCHooksTestSuite) TestTimeoutHooks() { + var ( + transferAmount = sdk.NewInt(1000000000) + timeoutHeight = clienttypes.NewHeight(0, 500) + // when transfer via sdk transfer from A (module) -> B (contract) + // nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // store code + suite.chainA.StoreContractCode(&suite.Suite, "../../tests/ibc-hooks/bytecode/counter.wasm") + // instancetiate contract + addr := suite.chainA.InstantiateContract(&suite.Suite, `{"count": 0}`, 1) + suite.Require().NotEmpty(addr) + + fmt.Println(addr.String()) + + // Generate swap instructions for the contract + callbackMemo := fmt.Sprintf(`{"ibc_callback":"%s"}`, addr) + + msg := transfertypes.NewMsgTransfer( + path.EndpointA.ChannelConfig.PortID, + path.EndpointA.ChannelID, + sdk.NewCoin(sdk.DefaultBondDenom, transferAmount), + suite.chainA.SenderAccount.GetAddress().String(), + addr.String(), + timeoutHeight, + uint64(suite.coordinator.CurrentTime.Add(time.Minute).UnixNano()), + callbackMemo, + ) + sdkResult, err := suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + + packet, err := customibctesting.ParsePacketFromEvents(sdkResult.GetEvents()) + suite.Require().NoError(err) + + // Move chainB forward one block + suite.chainB.NextBlock() + // One month later + suite.coordinator.IncrementTimeBy(time.Hour) + err = path.EndpointA.UpdateClient() + suite.Require().NoError(err) + + err = path.EndpointA.TimeoutPacket(packet) + suite.Require().NoError(err) + + // The test contract will increment the counter for itself by 10 when a packet times out + state := suite.chainA.QueryContract(&suite.Suite, addr, []byte(fmt.Sprintf(`{"get_count": {"addr": "%s"}}`, addr))) + suite.Require().Equal(`{"count":10}`, state) +} diff --git a/x/ibc-hooks/types/errors.go b/x/ibc-hooks/types/errors.go new file mode 100644 index 000000000..717e33bdd --- /dev/null +++ b/x/ibc-hooks/types/errors.go @@ -0,0 +1,17 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +var ( + ErrBadMetadataFormatMsg = "wasm metadata not properly formatted for: '%v'. %s" + ErrBadExecutionMsg = "cannot execute contract: %v" + + ErrMsgValidation = errorsmod.Register("wasm-hooks", 2, "error in wasmhook message validation") + ErrMarshaling = errorsmod.Register("wasm-hooks", 3, "cannot marshal the ICS20 packet") + ErrInvalidPacket = errorsmod.Register("wasm-hooks", 4, "invalid packet data") + ErrBadResponse = errorsmod.Register("wasm-hooks", 5, "cannot create response") + ErrWasmError = errorsmod.Register("wasm-hooks", 6, "wasm error") + ErrBadSender = errorsmod.Register("wasm-hooks", 7, "bad sender") +) diff --git a/x/ibc-hooks/types/keys.go b/x/ibc-hooks/types/keys.go new file mode 100644 index 000000000..9a3e7ded1 --- /dev/null +++ b/x/ibc-hooks/types/keys.go @@ -0,0 +1,8 @@ +package types + +const ( + ModuleName = "ibchooks" + StoreKey = "hooks-for-ibc" // not using the module name because of collisions with key "ibc" + IBCCallbackKey = "ibc_callback" + SenderPrefix = "ibc-wasm-hook-intermediary" +) diff --git a/x/ibc-hooks/utils.go b/x/ibc-hooks/utils.go new file mode 100644 index 000000000..e8c084e60 --- /dev/null +++ b/x/ibc-hooks/utils.go @@ -0,0 +1,77 @@ +package ibc_hooks + +import ( + "encoding/json" + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +const IbcAcknowledgementErrorType = "ibc-acknowledgement-error" + +// NewEmitErrorAcknowledgement creates a new error acknowledgement after having emitted an event with the +// details of the error. +func NewEmitErrorAcknowledgement(ctx sdk.Context, err error, errorContexts ...string) channeltypes.Acknowledgement { + logger := ctx.Logger().With("module", IbcAcknowledgementErrorType) + + attributes := make([]sdk.Attribute, len(errorContexts)+1) + attributes[0] = sdk.NewAttribute("error", err.Error()) + for i, s := range errorContexts { + attributes[i+1] = sdk.NewAttribute("error-context", s) + logger.Error(fmt.Sprintf("error-context: %v", s)) + } + + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + IbcAcknowledgementErrorType, + attributes..., + ), + }) + + return channeltypes.NewErrorAcknowledgement(err) +} + +// MustExtractDenomFromPacketOnRecv takes a packet with a valid ICS20 token data in the Data field and returns the +// denom as represented in the local chain. +// If the data cannot be unmarshalled this function will panic +func MustExtractDenomFromPacketOnRecv(packet ibcexported.PacketI) string { + var data transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &data); err != nil { + panic("unable to unmarshal ICS20 packet data") + } + + var denom string + if transfertypes.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), data.Denom) { + // remove prefix added by sender chain + voucherPrefix := transfertypes.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel()) + + unprefixedDenom := data.Denom[len(voucherPrefix):] + + // coin denomination used in sending from the escrow address + denom = unprefixedDenom + + // The denomination used to send the coins is either the native denom or the hash of the path + // if the denomination is not native. + denomTrace := transfertypes.ParseDenomTrace(unprefixedDenom) + if denomTrace.Path != "" { + denom = denomTrace.IBCDenom() + } + } else { + prefixedDenom := transfertypes.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel()) + data.Denom + denom = transfertypes.ParseDenomTrace(prefixedDenom).IBCDenom() + } + return denom +} + +// IsAckError checks an IBC acknowledgement to see if it's an error. +// This is a replacement for ack.Success() which is currently not working on some circumstances +func IsAckError(acknowledgement []byte) bool { + var ackErr channeltypes.Acknowledgement_Error + if err := json.Unmarshal(acknowledgement, &ackErr); err == nil && len(ackErr.Error) > 0 { + return true + } + return false +} diff --git a/x/ibc-hooks/wasm_hook.go b/x/ibc-hooks/wasm_hook.go new file mode 100644 index 000000000..a475fb5de --- /dev/null +++ b/x/ibc-hooks/wasm_hook.go @@ -0,0 +1,375 @@ +package ibc_hooks + +import ( + "encoding/json" + "fmt" + + wasmkeeper "github.com/CosmWasm/wasmd/x/wasm/keeper" + wasmtypes "github.com/CosmWasm/wasmd/x/wasm/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" + "github.com/notional-labs/centauri/v4/x/ibc-hooks/keeper" + "github.com/notional-labs/centauri/v4/x/ibc-hooks/types" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" +) + +type ContractAck struct { + ContractResult []byte `json:"contract_result"` + IbcAck []byte `json:"ibc_ack"` +} + +type WasmHooks struct { + ContractKeeper *wasmkeeper.Keeper + ibcHooksKeeper *keeper.Keeper + bech32PrefixAccAddr string +} + +func NewWasmHooks(ibcHooksKeeper *keeper.Keeper, contractKeeper *wasmkeeper.Keeper, bech32PrefixAccAddr string) WasmHooks { + return WasmHooks{ + ContractKeeper: contractKeeper, + ibcHooksKeeper: ibcHooksKeeper, + bech32PrefixAccAddr: bech32PrefixAccAddr, + } +} + +func (h WasmHooks) ProperlyConfigured() bool { + return h.ContractKeeper != nil && h.ibcHooksKeeper != nil +} + +func (h WasmHooks) OnRecvPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) ibcexported.Acknowledgement { + if !h.ProperlyConfigured() { + // Not configured + return im.App.OnRecvPacket(ctx, packet, relayer) + } + isIcs20, data := isIcs20Packet(packet.GetData()) + if !isIcs20 { + return im.App.OnRecvPacket(ctx, packet, relayer) + } + + // Validate the memo + isWasmRouted, contractAddr, msgBytes, err := ValidateAndParseMemo(data.GetMemo(), data.Receiver) + if !isWasmRouted { + return im.App.OnRecvPacket(ctx, packet, relayer) + } + if err != nil { + return NewEmitErrorAcknowledgement(ctx, types.ErrMsgValidation, err.Error()) + } + if msgBytes == nil || contractAddr == nil { // This should never happen + return NewEmitErrorAcknowledgement(ctx, types.ErrMsgValidation) + } + + // Calculate the receiver / contract caller based on the packet's channel and sender + channel := packet.GetDestChannel() + sender := data.GetSender() + senderBech32, err := keeper.DeriveIntermediateSender(channel, sender, h.bech32PrefixAccAddr) + if err != nil { + return NewEmitErrorAcknowledgement(ctx, types.ErrBadSender, fmt.Sprintf("cannot convert sender address %s/%s to bech32: %s", channel, sender, err.Error())) + } + + // The funds sent on this packet need to be transferred to the intermediary account for the sender. + // For this, we override the ICS20 packet's Receiver (essentially hijacking the funds to this new address) + // and execute the underlying OnRecvPacket() call (which should eventually land on the transfer app's + // relay.go and send the sunds to the intermediary account. + // + // If that succeeds, we make the contract call + data.Receiver = senderBech32 + bz, err := json.Marshal(data) + if err != nil { + return NewEmitErrorAcknowledgement(ctx, types.ErrMarshaling, err.Error()) + } + packet.Data = bz + + // Execute the receive + ack := im.App.OnRecvPacket(ctx, packet, relayer) + if !ack.Success() { + return ack + } + + amount, ok := sdk.NewIntFromString(data.GetAmount()) + if !ok { + // This should never happen, as it should've been caught in the underlaying call to OnRecvPacket, + // but returning here for completeness + return NewEmitErrorAcknowledgement(ctx, types.ErrInvalidPacket, "Amount is not an int") + } + + // The packet's denom is the denom in the sender chain. This needs to be converted to the local denom. + denom := MustExtractDenomFromPacketOnRecv(packet) + funds := sdk.NewCoins(sdk.NewCoin(denom, amount)) + + // Execute the contract + execMsg := wasmtypes.MsgExecuteContract{ + Sender: senderBech32, + Contract: contractAddr.String(), + Msg: msgBytes, + Funds: funds, + } + response, err := h.execWasmMsg(ctx, &execMsg) + if err != nil { + return NewEmitErrorAcknowledgement(ctx, types.ErrWasmError, err.Error()) + } + + fullAck := ContractAck{ContractResult: response.Data, IbcAck: ack.Acknowledgement()} + bz, err = json.Marshal(fullAck) + if err != nil { + return NewEmitErrorAcknowledgement(ctx, types.ErrBadResponse, err.Error()) + } + return channeltypes.NewResultAcknowledgement(bz) +} + +func (h WasmHooks) SendPacketOverride( + i ICS4Middleware, + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + isIcs20, Ics20PacketData := isIcs20Packet(data) + if !isIcs20 { + return i.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) // continue + } + + isCallbackRouted, metadata := jsonStringHasKey(Ics20PacketData.GetMemo(), types.IBCCallbackKey) + if !isCallbackRouted { + return i.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, data) // continue + } + + // We remove the callback metadata from the memo as it has already been processed. + + // If the only available key in the memo is the callback, we should remove the memo + // from the data completely so the packet is sent without it. + // This way receiver chains that are on old versions of IBC will be able to process the packet + + callbackRaw := metadata[types.IBCCallbackKey] // This will be used later. + delete(metadata, types.IBCCallbackKey) + bzMetadata, err := json.Marshal(metadata) + if err != nil { + return 0, errorsmod.Wrap(err, "Send packet with callback error") + } + stringMetadata := string(bzMetadata) + if stringMetadata == "{}" { + Ics20PacketData.Memo = "" + } else { + Ics20PacketData.Memo = stringMetadata + } + dataBytes, err := json.Marshal(Ics20PacketData) + if err != nil { + return 0, errorsmod.Wrap(err, "Send packet with callback error") + } + + seq, err := i.channel.SendPacket(ctx, chanCap, sourcePort, sourceChannel, timeoutHeight, timeoutTimestamp, dataBytes) + if err != nil { + return seq, err + } + + // Make sure the callback contract is a string and a valid bech32 addr. If it isn't, ignore this packet + contract, ok := callbackRaw.(string) + if !ok { + return seq, nil + } + _, err = sdk.AccAddressFromBech32(contract) + if err != nil { + return seq, nil + } + h.ibcHooksKeeper.StorePacketCallback(ctx, sourceChannel, seq, contract) + return seq, nil +} + +func (h WasmHooks) OnAcknowledgementPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, acknowledgement []byte, relayer sdk.AccAddress) error { + err := im.App.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) + if err != nil { + return err + } + + if !h.ProperlyConfigured() { + // Not configured. Return from the underlying implementation + return nil + } + + contract := h.ibcHooksKeeper.GetPacketCallback(ctx, packet.GetSourceChannel(), packet.GetSequence()) + if contract == "" { + // No callback configured + return nil + } + + contractAddr, err := sdk.AccAddressFromBech32(contract) + if err != nil { + return errorsmod.Wrap(err, "Ack callback error") // The callback configured is not a bech32. Error out + } + + success := "false" + if !IsAckError(acknowledgement) { + success = "true" + } + + // Notify the sender that the ack has been received + ackAsJson, err := json.Marshal(acknowledgement) + if err != nil { + // If the ack is not a json object, error + return err + } + + sudoMsg := []byte(fmt.Sprintf( + `{"ibc_lifecycle_complete": {"ibc_ack": {"channel": "%s", "sequence": %d, "ack": %s, "success": %s}}}`, + packet.SourceChannel, packet.Sequence, ackAsJson, success)) + _, err = h.ContractKeeper.Sudo(ctx, contractAddr, sudoMsg) + if err != nil { + // error processing the callback + // ToDo: Open Question: Should we also delete the callback here? + return errorsmod.Wrap(err, "Ack callback error") + } + + h.ibcHooksKeeper.DeletePacketCallback(ctx, packet.GetSourceChannel(), packet.GetSequence()) + return nil +} + +func (h WasmHooks) OnTimeoutPacketOverride(im IBCMiddleware, ctx sdk.Context, packet channeltypes.Packet, relayer sdk.AccAddress) error { + err := im.App.OnTimeoutPacket(ctx, packet, relayer) + if err != nil { + return err + } + + if !h.ProperlyConfigured() { + // Not configured. Return from the underlying implementation + return nil + } + + contract := h.ibcHooksKeeper.GetPacketCallback(ctx, packet.GetSourceChannel(), packet.GetSequence()) + if contract == "" { + // No callback configured + return nil + } + + contractAddr, err := sdk.AccAddressFromBech32(contract) + if err != nil { + return errorsmod.Wrap(err, "Timeout callback error") // The callback configured is not a bech32. Error out + } + + sudoMsg := []byte(fmt.Sprintf( + `{"ibc_lifecycle_complete": {"ibc_timeout": {"channel": "%s", "sequence": %d}}}`, + packet.SourceChannel, packet.Sequence)) + _, err = h.ContractKeeper.Sudo(ctx, contractAddr, sudoMsg) + if err != nil { + // error processing the callback. This could be because the contract doesn't implement the message type to + // process the callback. Retrying this will not help, so we can delete the callback from storage. + // Since the packet has timed out, we don't expect any other responses that may trigger the callback. + ctx.EventManager().EmitEvents(sdk.Events{ + sdk.NewEvent( + "ibc-timeout-callback-error", + sdk.NewAttribute("contract", contractAddr.String()), + sdk.NewAttribute("message", string(sudoMsg)), + sdk.NewAttribute("error", err.Error()), + ), + }) + } + h.ibcHooksKeeper.DeletePacketCallback(ctx, packet.GetSourceChannel(), packet.GetSequence()) + return nil +} + +func ValidateAndParseMemo(memo string, receiver string) (isWasmRouted bool, contractAddr sdk.AccAddress, msgBytes []byte, err error) { + isWasmRouted, metadata := jsonStringHasKey(memo, "wasm") + if !isWasmRouted { + return isWasmRouted, sdk.AccAddress{}, nil, nil + } + + wasmRaw := metadata["wasm"] + + // Make sure the wasm key is a map. If it isn't, ignore this packet + wasm, ok := wasmRaw.(map[string]interface{}) + if !ok { + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, "wasm metadata is not a valid JSON map object") + } + + // Get the contract + contract, ok := wasm["contract"].(string) + if !ok { + // The tokens will be returned + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, `Could not find key wasm["contract"]`) + } + + contractAddr, err = sdk.AccAddressFromBech32(contract) + if err != nil { + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, `wasm["contract"] is not a valid bech32 address`) + } + + // The contract and the receiver should be the same for the packet to be valid + if contract != receiver { + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, `wasm["contract"] should be the same as the receiver of the packet`) + } + + // Ensure the message key is provided + if wasm["msg"] == nil { + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, `Could not find key wasm["msg"]`) + } + + // Make sure the msg key is a map. If it isn't, return an error + _, ok = wasm["msg"].(map[string]interface{}) + if !ok { + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, `wasm["msg"] is not a map object`) + } + + // Get the message string by serializing the map + msgBytes, err = json.Marshal(wasm["msg"]) + if err != nil { + // The tokens will be returned + return isWasmRouted, sdk.AccAddress{}, nil, + fmt.Errorf(types.ErrBadMetadataFormatMsg, memo, err.Error()) + } + + return isWasmRouted, contractAddr, msgBytes, nil +} + +func isIcs20Packet(data []byte) (isIcs20 bool, ics20data transfertypes.FungibleTokenPacketData) { + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(data, &packetData); err != nil { + return false, packetData + } + return true, packetData +} + +// jsonStringHasKey parses the memo as a json object and checks if it contains the key. +func jsonStringHasKey(memo, key string) (found bool, jsonObject map[string]interface{}) { + jsonObject = make(map[string]interface{}) + + // If there is no memo, the packet was either sent with an earlier version of IBC, or the memo was + // intentionally left blank. Nothing to do here. Ignore the packet and pass it down the stack. + if len(memo) == 0 { + return false, jsonObject + } + + // the jsonObject must be a valid JSON object + err := json.Unmarshal([]byte(memo), &jsonObject) + if err != nil { + return false, jsonObject + } + + // If the key doesn't exist, there's nothing to do on this hook. Continue by passing the packet + // down the stack + _, ok := jsonObject[key] + if !ok { + return false, jsonObject + } + + return true, jsonObject +} + +func (h WasmHooks) execWasmMsg(ctx sdk.Context, execMsg *wasmtypes.MsgExecuteContract) (*wasmtypes.MsgExecuteContractResponse, error) { + if err := execMsg.ValidateBasic(); err != nil { + return nil, fmt.Errorf(types.ErrBadExecutionMsg, err.Error()) + } + wasmMsgServer := wasmkeeper.NewMsgServerImpl(h.ContractKeeper) + return wasmMsgServer.ExecuteContract(sdk.WrapSDKContext(ctx), execMsg) +} diff --git a/x/mint/abci.go b/x/mint/abci.go index c2439f140..c48d68482 100644 --- a/x/mint/abci.go +++ b/x/mint/abci.go @@ -5,8 +5,8 @@ import ( "github.com/cosmos/cosmos-sdk/telemetry" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/mint/keeper" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/keeper" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // BeginBlocker mints new tokens for the previous block. diff --git a/x/mint/client/cli/query.go b/x/mint/client/cli/query.go index 31d809745..a9713bc5b 100644 --- a/x/mint/client/cli/query.go +++ b/x/mint/client/cli/query.go @@ -7,7 +7,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // GetQueryCmd returns the cli query commands for the minting module. diff --git a/x/mint/client/cli/tx.go b/x/mint/client/cli/tx.go index 730b5694c..435980406 100644 --- a/x/mint/client/cli/tx.go +++ b/x/mint/client/cli/tx.go @@ -5,7 +5,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" "github.com/spf13/cobra" ) diff --git a/x/mint/keeper/genesis.go b/x/mint/keeper/genesis.go index 65ba199c2..9c923c992 100644 --- a/x/mint/keeper/genesis.go +++ b/x/mint/keeper/genesis.go @@ -2,7 +2,7 @@ package keeper import ( sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // InitGenesis new mint genesis diff --git a/x/mint/keeper/grpc_query.go b/x/mint/keeper/grpc_query.go index b46e00700..9602beab9 100644 --- a/x/mint/keeper/grpc_query.go +++ b/x/mint/keeper/grpc_query.go @@ -4,7 +4,7 @@ import ( "context" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) var _ types.QueryServer = Keeper{} diff --git a/x/mint/keeper/keeper.go b/x/mint/keeper/keeper.go index 9e8efa15f..eecd683d7 100644 --- a/x/mint/keeper/keeper.go +++ b/x/mint/keeper/keeper.go @@ -9,7 +9,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // Keeper of the mint store diff --git a/x/mint/keeper/msg_server.go b/x/mint/keeper/msg_server.go index 508cdf3f1..3a14411be 100644 --- a/x/mint/keeper/msg_server.go +++ b/x/mint/keeper/msg_server.go @@ -7,7 +7,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) var _ types.MsgServer = msgServer{} diff --git a/x/mint/module.go b/x/mint/module.go index 83187e98b..1a154dc78 100644 --- a/x/mint/module.go +++ b/x/mint/module.go @@ -15,10 +15,10 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/notional-labs/centauri/v3/x/mint/client/cli" - "github.com/notional-labs/centauri/v3/x/mint/keeper" - "github.com/notional-labs/centauri/v3/x/mint/simulation" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/client/cli" + "github.com/notional-labs/centauri/v4/x/mint/keeper" + "github.com/notional-labs/centauri/v4/x/mint/simulation" + "github.com/notional-labs/centauri/v4/x/mint/types" ) var ( diff --git a/x/mint/simulation/decoder.go b/x/mint/simulation/decoder.go index cdccb95a9..5853e8151 100644 --- a/x/mint/simulation/decoder.go +++ b/x/mint/simulation/decoder.go @@ -6,7 +6,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/types/kv" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // NewDecodeStore returns a decoder function closure that unmarshals the KVPair's diff --git a/x/mint/simulation/decoder_test.go b/x/mint/simulation/decoder_test.go index 3eebc2c29..1a05a4862 100644 --- a/x/mint/simulation/decoder_test.go +++ b/x/mint/simulation/decoder_test.go @@ -9,8 +9,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/kv" "github.com/cosmos/cosmos-sdk/types/module/testutil" - "github.com/notional-labs/centauri/v3/x/mint/simulation" - centauriminttypes "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/simulation" + centauriminttypes "github.com/notional-labs/centauri/v4/x/mint/types" ) func TestDecodeStore(t *testing.T) { diff --git a/x/mint/simulation/genesis.go b/x/mint/simulation/genesis.go index bbc6529e6..6c5d9d1a5 100644 --- a/x/mint/simulation/genesis.go +++ b/x/mint/simulation/genesis.go @@ -8,7 +8,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/module" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // Simulation parameter constants diff --git a/x/mint/simulation/genesis_test.go b/x/mint/simulation/genesis_test.go index 87ef5335f..3f8ed9974 100644 --- a/x/mint/simulation/genesis_test.go +++ b/x/mint/simulation/genesis_test.go @@ -13,8 +13,8 @@ import ( moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/cosmos/cosmos-sdk/x/mint" - "github.com/notional-labs/centauri/v3/x/mint/simulation" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/simulation" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // TestRandomizedGenState tests the normal scenario of applying RandomizedGenState. diff --git a/x/mint/simulation/proposals.go b/x/mint/simulation/proposals.go index 309fd38d9..cf2aaa720 100644 --- a/x/mint/simulation/proposals.go +++ b/x/mint/simulation/proposals.go @@ -7,7 +7,7 @@ import ( "github.com/cosmos/cosmos-sdk/types/address" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/cosmos/cosmos-sdk/x/simulation" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/types" ) // Simulation operation weights constants diff --git a/x/mint/simulation/proposals_test.go b/x/mint/simulation/proposals_test.go index cf0528974..14ae52098 100644 --- a/x/mint/simulation/proposals_test.go +++ b/x/mint/simulation/proposals_test.go @@ -10,8 +10,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/address" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" - "github.com/notional-labs/centauri/v3/x/mint/simulation" - "github.com/notional-labs/centauri/v3/x/mint/types" + "github.com/notional-labs/centauri/v4/x/mint/simulation" + "github.com/notional-labs/centauri/v4/x/mint/types" ) func TestProposalMsgs(t *testing.T) { diff --git a/x/mint/types/tx.pb.go b/x/mint/types/tx.pb.go index ba74708ab..ac75494ea 100644 --- a/x/mint/types/tx.pb.go +++ b/x/mint/types/tx.pb.go @@ -37,7 +37,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // // Since: cosmos-sdk 0.47 type MsgUpdateParams struct { - // authority is the address that controls the module (defaults to x/gov unless overwritten). + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty"` // params defines the x/mint parameters to update. // @@ -222,7 +223,8 @@ var xxx_messageInfo_MsgFundModuleAccountResponse proto.InternalMessageInfo // MsgAddAccountToFundModuleSet add account in to allowed fund module set type MsgAddAccountToFundModuleSet struct { - // authority is the address that controls the module (defaults to x/gov unless overwritten). + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` AllowedAddress string `protobuf:"bytes,2,opt,name=allowed_address,json=allowedAddress,proto3" json:"allowed_address,omitempty" yaml:"allowed_address"` } diff --git a/x/ratelimit/client/cli/query.go b/x/ratelimit/client/cli/query.go new file mode 100644 index 000000000..6b0ff3087 --- /dev/null +++ b/x/ratelimit/client/cli/query.go @@ -0,0 +1,26 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + // Group ratelimit queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand() + return cmd +} diff --git a/x/ratelimit/client/cli/tx.go b/x/ratelimit/client/cli/tx.go new file mode 100644 index 000000000..38a11e5ba --- /dev/null +++ b/x/ratelimit/client/cli/tx.go @@ -0,0 +1,22 @@ +package cli + +import ( + "fmt" + + "github.com/notional-labs/centauri/v4/x/ratelimit/types" + "github.com/spf13/cobra" +) + +// GetTxCmd returns the tx commands for router +func GetTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: "transfermiddleware", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + Short: fmt.Sprintf("Tx commands for the %s module", types.ModuleName), + } + + txCmd.AddCommand() + + return txCmd +} diff --git a/x/ratelimit/ibc_middleware.go b/x/ratelimit/ibc_middleware.go new file mode 100644 index 000000000..1d2513936 --- /dev/null +++ b/x/ratelimit/ibc_middleware.go @@ -0,0 +1,184 @@ +package ratelimit + +import ( + "fmt" + + "github.com/notional-labs/centauri/v4/x/ratelimit/keeper" + + sdk "github.com/cosmos/cosmos-sdk/types" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +var _ porttypes.Middleware = &IBCMiddleware{} + +type IBCMiddleware struct { + app porttypes.IBCModule + keeper keeper.Keeper +} + +func NewIBCMiddleware(k keeper.Keeper, app porttypes.IBCModule) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenInit(ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + return im.app.OnChanOpenInit( + ctx, + order, + connectionHops, + portID, + channelID, + channelCap, + counterparty, + version, + ) +} + +// OnChanOpenTry implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyChannelID string, + counterpartyVersion string, +) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// OnRecvPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) exported.Acknowledgement { + // Check if the packet would cause the rate limit to be exceeded, + // and if so, return an ack error + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 packet receive was denied: %s", err.Error())) + return channeltypes.NewErrorAcknowledgement(err) + } + + // If the packet was not rate-limited, pass it down to the Transfer OnRecvPacket callback + return im.app.OnRecvPacket(ctx, packet, relayer) +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + relayer sdk.AccAddress, +) error { + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 RateLimited OnAckPacket failed: %s", err.Error())) + return err + } + return im.app.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) error { + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 RateLimited OnTimeoutPacket failed: %s", err.Error())) + return err + } + return im.app.OnTimeoutPacket(ctx, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface +// Rate-limited SendPacket found in RateLimit Keeper +func (im IBCMiddleware) SendPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + return im.keeper.SendPacket( + ctx, + chanCap, + sourcePort, + sourceChannel, + timeoutHeight, + timeoutTimestamp, + data, + ) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface +func (im IBCMiddleware) WriteAcknowledgement( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + ack exported.Acknowledgement, +) error { + return im.keeper.WriteAcknowledgement(ctx, chanCap, packet, ack) +} + +// GetAppVersion returns the application version of the underlying application +func (i IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return i.keeper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/abci.go b/x/ratelimit/keeper/abci.go new file mode 100644 index 000000000..8e3aace1b --- /dev/null +++ b/x/ratelimit/keeper/abci.go @@ -0,0 +1,57 @@ +package keeper + +import ( + "fmt" + "time" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +// BeginBlocker of epochs module. +func (k Keeper) BeginBlocker(ctx sdk.Context) { + defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker) + k.IterateEpochInfo(ctx, func(index int64, epochInfo types.EpochInfo) (stop bool) { + logger := k.Logger(ctx) + + // If blocktime < initial epoch start time, return + if ctx.BlockTime().Before(epochInfo.StartTime) { + return + } + // if epoch counting hasn't started, signal we need to start. + shouldInitialEpochStart := !epochInfo.EpochCountingStarted + + epochEndTime := epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + shouldEpochStart := (ctx.BlockTime().After(epochEndTime)) || shouldInitialEpochStart + + if !shouldEpochStart { + return false + } + epochInfo.CurrentEpochStartHeight = ctx.BlockHeight() + + if shouldInitialEpochStart { + epochInfo.EpochCountingStarted = true + epochInfo.CurrentEpoch = 1 + epochInfo.CurrentEpochStartTime = epochInfo.StartTime + logger.Info(fmt.Sprintf("Starting new epoch with identifier %s epoch number %d", epochInfo.Identifier, epochInfo.CurrentEpoch)) + } else { + k.AfterEpochEnd(ctx, epochInfo) + epochInfo.CurrentEpoch += 1 + epochInfo.CurrentEpochStartTime = epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + logger.Info(fmt.Sprintf("Starting epoch with identifier %s epoch number %d", epochInfo.Identifier, epochInfo.CurrentEpoch)) + } + + // emit new epoch start event, set epoch info, and run BeforeEpochStart hook + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeEpochStart, + sdk.NewAttribute(types.AttributeEpochNumber, fmt.Sprintf("%d", epochInfo.CurrentEpoch)), + sdk.NewAttribute(types.AttributeEpochStartTime, fmt.Sprintf("%d", epochInfo.CurrentEpochStartTime.Unix())), + ), + ) + k.setEpochInfo(ctx, epochInfo) + + return false + }) +} diff --git a/x/ratelimit/keeper/epoch.go b/x/ratelimit/keeper/epoch.go new file mode 100644 index 000000000..7b788668f --- /dev/null +++ b/x/ratelimit/keeper/epoch.go @@ -0,0 +1,132 @@ +package keeper + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/notional-labs/centauri/v4/x/ratelimit/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// HasEpochInfo return true if has epoch info +func (k Keeper) HasEpochInfo(ctx sdk.Context, identifier string) bool { + store := ctx.KVStore(k.storeKey) + return store.Has(append(types.EpochKeyPrefix, []byte(identifier)...)) +} + +// GetEpochInfo returns epoch info by identifier. +func (k Keeper) GetEpochInfo(ctx sdk.Context, identifier string) types.EpochInfo { + epoch := types.EpochInfo{} + store := ctx.KVStore(k.storeKey) + b := store.Get(append(types.EpochKeyPrefix, []byte(identifier)...)) + if b == nil { + return epoch + } + err := proto.Unmarshal(b, &epoch) + if err != nil { + panic(err) + } + return epoch +} + +// AddEpochInfo adds a new epoch info. Will return an error if the epoch fails validation, +// or re-uses an existing identifier. +// This method also sets the start time if left unset, and sets the epoch start height. +func (k Keeper) AddEpochInfo(ctx sdk.Context, epoch types.EpochInfo) error { + err := epoch.Validate() + if err != nil { + return err + } + // Check if identifier already exists + if (k.GetEpochInfo(ctx, epoch.Identifier) != types.EpochInfo{}) { + return fmt.Errorf("epoch with identifier %s already exists", epoch.Identifier) + } + + // Initialize empty and default epoch values + if epoch.StartTime.Equal(time.Time{}) { + epoch.StartTime = ctx.BlockTime() + } + epoch.CurrentEpochStartHeight = ctx.BlockHeight() + k.setEpochInfo(ctx, epoch) + return nil +} + +// setEpochInfo set epoch info. +func (k Keeper) setEpochInfo(ctx sdk.Context, epoch types.EpochInfo) { + store := ctx.KVStore(k.storeKey) + value, err := proto.Marshal(&epoch) + if err != nil { + panic(err) + } + store.Set(append(types.EpochKeyPrefix, []byte(epoch.Identifier)...), value) +} + +// DeleteEpochInfo delete epoch info. +func (k Keeper) DeleteEpochInfo(ctx sdk.Context, identifier string) { + store := ctx.KVStore(k.storeKey) + store.Delete(append(types.EpochKeyPrefix, []byte(identifier)...)) +} + +// IterateEpochInfo iterate through epochs. +func (k Keeper) IterateEpochInfo(ctx sdk.Context, fn func(index int64, epochInfo types.EpochInfo) (stop bool)) { + store := ctx.KVStore(k.storeKey) + + iterator := sdk.KVStorePrefixIterator(store, types.EpochKeyPrefix) + defer iterator.Close() + + i := int64(0) + + for ; iterator.Valid(); iterator.Next() { + epoch := types.EpochInfo{} + err := proto.Unmarshal(iterator.Value(), &epoch) + if err != nil { + panic(err) + } + stop := fn(i, epoch) + + if stop { + break + } + i++ + } +} + +// AllEpochInfos iterate through epochs to return all epochs info. +func (k Keeper) AllEpochInfos(ctx sdk.Context) []types.EpochInfo { + epochs := []types.EpochInfo{} + k.IterateEpochInfo(ctx, func(index int64, epochInfo types.EpochInfo) (stop bool) { + epochs = append(epochs, epochInfo) + return false + }) + return epochs +} + +// NumBlocksSinceEpochStart returns the number of blocks since the epoch started. +// if the epoch started on block N, then calling this during block N (after BeforeEpochStart) +// would return 0. +// Calling it any point in block N+1 (assuming the epoch doesn't increment) would return 1. +func (k Keeper) NumBlocksSinceEpochStart(ctx sdk.Context, identifier string) (int64, error) { + epoch := k.GetEpochInfo(ctx, identifier) + if (epoch == types.EpochInfo{}) { + return 0, fmt.Errorf("epoch with identifier %s not found", identifier) + } + return ctx.BlockHeight() - epoch.CurrentEpochStartHeight, nil +} + +func (k Keeper) AfterEpochEnd(ctx sdk.Context, epochInfo types.EpochInfo) { + if epochInfo.Identifier == types.DAY_EPOCH { + epochHour := uint64(epochInfo.CurrentEpoch) + + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if epochHour%rateLimit.Quota.DurationHours == 0 { + err := k.ResetRateLimit(ctx, rateLimit.Path.Denom, rateLimit.Path.ChannelId) + if err != nil { + k.Logger(ctx).Error(fmt.Sprintf("Unable to reset quota for Denom: %s, ChannelId: %s", rateLimit.Path.Denom, rateLimit.Path.ChannelId)) + } + } + } + } +} diff --git a/x/ratelimit/keeper/genesis.go b/x/ratelimit/keeper/genesis.go new file mode 100644 index 000000000..c6da50481 --- /dev/null +++ b/x/ratelimit/keeper/genesis.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +func (k Keeper) InitGenesis(ctx sdk.Context, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) + for _, rateLimit := range genState.RateLimits { + k.SetRateLimit(ctx, rateLimit) + } + for _, addressPair := range genState.WhitelistedAddressPairs { + k.SetWhitelistedAddressPair(ctx, addressPair) + } + for _, pendingPacketId := range genState.PendingSendPacketSequenceNumbers { + splits := strings.Split(pendingPacketId, "/") + if len(splits) != 2 { + panic("Invalid pending send packet, must be of form: {channelId}/{sequenceNumber}") + } + channelId := splits[0] + sequence, err := strconv.ParseUint(splits[1], 10, 64) + if err != nil { + panic(err) + } + k.SetPendingSendPacket(ctx, channelId, sequence) + } + for _, epoch := range genState.Epochs { + err := k.AddEpochInfo(ctx, epoch) + if err != nil { + panic(err) + } + } +} + +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + genesis := types.DefaultGenesisState() + + genesis.Params = k.GetParams(ctx) + genesis.RateLimits = k.GetAllRateLimits(ctx) + genesis.WhitelistedAddressPairs = k.GetAllWhitelistedAddressPairs(ctx) + genesis.PendingSendPacketSequenceNumbers = k.GetAllPendingSendPackets(ctx) + + return genesis +} diff --git a/x/ratelimit/keeper/grpc_query.go b/x/ratelimit/keeper/grpc_query.go new file mode 100644 index 000000000..7798e6a40 --- /dev/null +++ b/x/ratelimit/keeper/grpc_query.go @@ -0,0 +1,78 @@ +package keeper + +import ( + "context" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +var _ types.QueryServer = Keeper{} + +// Query all rate limits +func (k Keeper) AllRateLimits(goCtx context.Context, req *types.QueryAllRateLimitsRequest) (*types.QueryAllRateLimitsResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + rateLimits := k.GetAllRateLimits(ctx) + return &types.QueryAllRateLimitsResponse{RateLimits: rateLimits}, nil +} + +// Query a rate limit by denom and channelId +func (k Keeper) RateLimit(goCtx context.Context, req *types.QueryRateLimitRequest) (*types.QueryRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + rateLimit, found := k.GetRateLimit(ctx, req.Denom, req.ChannelId) + if !found { + return &types.QueryRateLimitResponse{}, nil + } + return &types.QueryRateLimitResponse{RateLimit: &rateLimit}, nil +} + +// Query all rate limits for a given chain +func (k Keeper) RateLimitsByChainId(goCtx context.Context, req *types.QueryRateLimitsByChainIdRequest) (*types.QueryRateLimitsByChainIdResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + + // Determine the client state from the channel Id + _, clientState, err := k.channelKeeper.GetChannelClientState(ctx, transfertypes.PortID, rateLimit.Path.ChannelId) + if err != nil { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Unable to fetch client state from channelId") + } + client, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Client state is not tendermint") + } + + // If the chain ID matches, add the rate limit to the returned list + if client.ChainId == req.ChainId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChainIdResponse{RateLimits: rateLimits}, nil +} + +// Query all rate limits for a given channel +func (k Keeper) RateLimitsByChannelId(goCtx context.Context, req *types.QueryRateLimitsByChannelIdRequest) (*types.QueryRateLimitsByChannelIdResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + // If the channel ID matches, add the rate limit to the returned list + if rateLimit.Path.ChannelId == req.ChannelId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChannelIdResponse{RateLimits: rateLimits}, nil +} + +// Query all whitelisted addresses +func (k Keeper) AllWhitelistedAddresses(goCtx context.Context, req *types.QueryAllWhitelistedAddressesRequest) (*types.QueryAllWhitelistedAddressesResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + whitelistedAddresses := k.GetAllWhitelistedAddressPairs(ctx) + return &types.QueryAllWhitelistedAddressesResponse{AddressPairs: whitelistedAddresses}, nil +} diff --git a/x/ratelimit/keeper/keeper.go b/x/ratelimit/keeper/keeper.go new file mode 100644 index 000000000..0962b61b5 --- /dev/null +++ b/x/ratelimit/keeper/keeper.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "fmt" + + "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" + tfmwkeeper "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" +) + +type Keeper struct { + storeKey storetypes.StoreKey + cdc codec.BinaryCodec + paramstore paramtypes.Subspace + + bankKeeper types.BankKeeper + channelKeeper types.ChannelKeeper + ics4Wrapper porttypes.ICS4Wrapper + tfmwKeeper tfmwkeeper.Keeper + + // the address capable of executing a AddParachainIBCTokenInfo and RemoveParachainIBCTokenInfo message. Typically, this + // should be the x/gov module account. + authority string +} + +func NewKeeper( + cdc codec.BinaryCodec, + key storetypes.StoreKey, + ps paramtypes.Subspace, + bankKeeper types.BankKeeper, + channelKeeper types.ChannelKeeper, + ics4Wrapper porttypes.ICS4Wrapper, + tfmwKeeper tfmwkeeper.Keeper, + authority string, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: key, + paramstore: ps, + bankKeeper: bankKeeper, + channelKeeper: channelKeeper, + ics4Wrapper: ics4Wrapper, + tfmwKeeper: tfmwKeeper, + authority: authority, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) types.Params { + return types.NewParams() +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/ratelimit/keeper/msg_server.go b/x/ratelimit/keeper/msg_server.go new file mode 100644 index 000000000..ece478fde --- /dev/null +++ b/x/ratelimit/keeper/msg_server.go @@ -0,0 +1,83 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +var _ types.MsgServer = msgServer{} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{ + Keeper: keeper, + } +} + +type msgServer struct { + Keeper +} + +func (k Keeper) AddTransferRateLimit(goCtx context.Context, msg *types.MsgAddRateLimit) (*types.MsgAddRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.AddRateLimit(ctx, msg) + if err != nil { + return nil, err + } + + return &types.MsgAddRateLimitResponse{}, nil +} + +func (k Keeper) UpdateTransferRateLimit(goCtx context.Context, msg *types.MsgUpdateRateLimit) (*types.MsgUpdateRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.UpdateRateLimit(ctx, msg) + if err != nil { + return nil, err + } + + return &types.MsgUpdateRateLimitResponse{}, nil +} + +func (k Keeper) RemoveTransferRateLimit(goCtx context.Context, msg *types.MsgRemoveRateLimit) (*types.MsgRemoveRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.RemoveRateLimit(ctx, msg.Denom, msg.ChannelId) + if err != nil { + return nil, err + } + + return &types.MsgRemoveRateLimitResponse{}, nil +} + +func (k Keeper) ResetTransferRateLimit(goCtx context.Context, msg *types.MsgResetRateLimit) (*types.MsgResetRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.ResetRateLimit(ctx, msg.Denom, msg.ChannelId) + if err != nil { + return nil, err + } + return &types.MsgResetRateLimitResponse{}, nil +} diff --git a/x/ratelimit/keeper/packet.go b/x/ratelimit/keeper/packet.go new file mode 100644 index 000000000..3bfca4baa --- /dev/null +++ b/x/ratelimit/keeper/packet.go @@ -0,0 +1,279 @@ +package keeper + +import ( + "encoding/json" + "fmt" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" + + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +type RateLimitedPacketInfo struct { + ChannelID string + Denom string + Amount math.Int + Sender string + Receiver string +} + +// Parse the denom from the Send Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a SEND packet depends on whether +// it was a NATIVE token (e.g. ustrd, stuatom, etc.) or NON-NATIVE token (e.g. ibc/...)... +// +// We can identify if the token is native or not by parsing the trace denom from the packet +// If the token is NATIVE, it will not have a prefix (e.g. ustrd), +// and if it is NON-NATIVE, it will have a prefix (e.g. transfer/channel-2/uosmo) +// +// For NATIVE denoms, return as is (e.g. ustrd) +// For NON-NATIVE denoms, take the ibc hash (e.g. hash "transfer/channel-2/usoms" into "ibc/...") +func (k Keeper) ParseDenomFromSendPacket(packet transfertypes.FungibleTokenPacketData) (denom string) { + // Determine the denom by looking at the denom trace path + denomTrace := transfertypes.ParseDenomTrace(packet.Denom) + + // Native assets will have an empty trace path and can be returned as is + if denomTrace.Path == "" { + denom = packet.Denom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Parse the denom from the Recv Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a RECEIVE packet depends on whether it was a source or sink +// +// Sink: The token moves forward, to a chain different than its previous hop +// The new port and channel are APPENDED to the denom trace. +// (e.g. A -> B, B is a sink) (e.g. A -> B -> C, C is a sink) +// +// Source: The token moves backwards (i.e. revisits the last chain it was sent from) +// The port and channel are REMOVED from the denom trace - undoing the last hop. +// (e.g. A -> B -> A, A is a source) (e.g. A -> B -> C -> B, B is a source) +// +// If the chain is acting as a SINK: We add on the Stride port and channel and hash it +// Ex1: uosmo sent from Osmosis to Stride +// Packet Denom: uosmo +// -> Add Prefix: transfer/channel-X/uosmo +// -> Hash: ibc/... +// +// Ex2: ujuno sent from Osmosis to Stride +// PacketDenom: transfer/channel-Y/ujuno (channel-Y is the Juno <> Osmosis channel) +// -> Add Prefix: transfer/channel-X/transfer/channel-Y/ujuno +// -> Hash: ibc/... +// +// If the chain is acting as a SOURCE: First, remove the prefix. Then if there is still a denom trace, hash it +// Ex1: ustrd sent back to Stride from Osmosis +// Packet Denom: transfer/channel-X/ustrd +// -> Remove Prefix: ustrd +// -> Leave as is: ustrd +// +// Ex2: juno was sent to Stride, then to Osmosis, then back to Stride +// Packet Denom: transfer/channel-X/transfer/channel-Z/ujuno +// -> Remove Prefix: transfer/channel-Z/ujuno +// -> Hash: ibc/... +func (k Keeper) ParseDenomFromRecvPacket(packet channeltypes.Packet, packetData transfertypes.FungibleTokenPacketData) (denom string) { + // To determine the denom, first check whether Stride is acting as source + if transfertypes.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), packetData.Denom) { + // Remove the source prefix (e.g. transfer/channel-X/transfer/channel-Z/ujuno -> transfer/channel-Z/ujuno) + sourcePrefix := transfertypes.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel()) + unprefixedDenom := packetData.Denom[len(sourcePrefix):] + + // Native assets will have an empty trace path and can be returned as is + denomTrace := transfertypes.ParseDenomTrace(unprefixedDenom) + if denomTrace.Path == "" { + denom = unprefixedDenom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + } else { + // Prefix the destination channel - this will contain the trailing slash (e.g. transfer/channel-X/) + destinationPrefix := transfertypes.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel()) + prefixedDenom := destinationPrefix + packetData.Denom + + // Hash the denom trace + denomTrace := transfertypes.ParseDenomTrace(prefixedDenom) + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Parses the sender and channelId and denom for the corresponding RateLimit object, and +// the sender/receiver/transfer amount +// +// The Stride channelID should always be used as the key for the RateLimit object (not the counterparty channelID) +// For a SEND packet, the Stride channelID is the SOURCE channel +// For a RECEIVE packet, the Stride channelID is the DESTINATION channel +// +// The Source and Desination are defined from the perspective of a packet recipient +// Meaning, when a send packet lands on a the host chain, the "Source" will be the Stride Channel, +// and the "Destination" will be the Host Channel +// And, when a receive packet lands on a Stride, the "Source" will be the host zone's channel, +// and the "Destination" will be the Stride Channel +func (k Keeper) ParsePacketInfo(packet channeltypes.Packet, direction types.PacketDirection) (RateLimitedPacketInfo, error) { + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return RateLimitedPacketInfo{}, err + } + + var channelID, denom string + if direction == types.PACKET_SEND { + channelID = packet.GetSourceChannel() + denom = k.ParseDenomFromSendPacket(packetData) + } else { + channelID = packet.GetDestChannel() + denom = k.ParseDenomFromRecvPacket(packet, packetData) + } + + amount, ok := sdk.NewIntFromString(packetData.Amount) + if !ok { + return RateLimitedPacketInfo{}, + errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount '%s' to sdkmath.Int", packetData.Amount) + } + + packetInfo := RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: packetData.Sender, + Receiver: packetData.Receiver, + } + + return packetInfo, nil +} + +// Middleware implementation for SendPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, sends the packet +func (k Keeper) SendRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + // Check if the packet would exceed the outflow rate limit + updatedFlow, err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_SEND, packetInfo) + if err != nil { + return err + } + // Store the sequence number of the packet so that if the transfer fails, + // we can identify if it was sent during this quota and can revert the outflow + if updatedFlow { + k.SetPendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + } + + return nil +} + +// Middleware implementation for RecvPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, allows the packet +func (k Keeper) ReceiveRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_RECV) + if err != nil { + return err + } + + _, err = k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_RECV, packetInfo) + return err +} + +// Middleware implementation for OnAckPacket with rate limiting +// If the packet failed, we should decrement the Outflow +func (k Keeper) AcknowledgeRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet, ack []byte) error { + // Parse the denom, channelId, and amount from the packet + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + var acknowledgement channeltypes.Acknowledgement + if err := transfertypes.ModuleCdc.UnmarshalJSON(ack, &acknowledgement); err != nil { + return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %s", err.Error()) + } + + // The ack can come back as either AcknowledgementResult or AcknowledgementError + // If it comes back as AcknowledgementResult, the messages are encoded differently depending on the SDK version + switch response := acknowledgement.Response.(type) { + case *channeltypes.Acknowledgement_Result: + if len(response.Result) == 0 { + return errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "acknowledgement result cannot be empty") + } + // If the ack was successful, remove the pending packet + k.RemovePendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + return nil + default: + // If the ack failed, undo the change to the rate limit Outflow + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) + } +} + +// Middleware implementation for OnAckPacket with rate limiting +// The Outflow should be decremented from the failed packet +func (k Keeper) TimeoutRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} + +// SendPacket wraps IBC ChannelKeeper's SendPacket function +// If the packet does not get rate limited, it passes the packet to the IBC Channel keeper +func (k Keeper) SendPacket( + ctx sdk.Context, + channelCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + // The packet must first be sent up the stack to get the sequence number from the channel keeper + sequence, err = k.ics4Wrapper.SendPacket( + ctx, + channelCap, + sourcePort, + sourceChannel, + timeoutHeight, + timeoutTimestamp, + data, + ) + if err != nil { + return sequence, err + } + err = k.SendRateLimitedPacket(ctx, channeltypes.Packet{ + Sequence: sequence, + SourceChannel: sourceChannel, + SourcePort: sourcePort, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + Data: data, + }) + if err != nil { + k.Logger(ctx).Error(fmt.Sprintf("ICS20 packet send was denied: %s", err.Error())) + return 0, err + } + + return sequence, err +} + +// WriteAcknowledgement wraps IBC ChannelKeeper's WriteAcknowledgement function +func (k Keeper) WriteAcknowledgement(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, acknowledgement ibcexported.Acknowledgement) error { + return k.ics4Wrapper.WriteAcknowledgement(ctx, chanCap, packet, acknowledgement) +} + +// GetAppVersion wraps IBC ChannelKeeper's GetAppVersion function +func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/rate_limit.go b/x/ratelimit/keeper/rate_limit.go new file mode 100644 index 000000000..ca3b09c9d --- /dev/null +++ b/x/ratelimit/keeper/rate_limit.go @@ -0,0 +1,392 @@ +package keeper + +import ( + "encoding/binary" + "fmt" + "strings" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" +) + +// Get the rate limit byte key built from the denom and channelId +func GetRateLimitItemKey(denom string, channelId string) []byte { + return append(types.KeyPrefix(denom), types.KeyPrefix(channelId)...) +} + +// The total value on a given path (aka, the denominator in the percentage calculation) +// is the total supply of the given denom +func (k Keeper) GetChannelValue(ctx sdk.Context, denom string) math.Int { + return k.bankKeeper.GetSupply(ctx, denom).Amount +} + +// If the rate limit is exceeded or the denom is blacklisted, we emit an event +func EmitTransferDeniedEvent(ctx sdk.Context, reason, denom, channelId string, direction types.PacketDirection, amount math.Int, err error) { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTransferDenied, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReason, reason), + sdk.NewAttribute(types.AttributeKeyAction, strings.ToLower(direction.String())), // packet_send or packet_recv + sdk.NewAttribute(types.AttributeKeyDenom, denom), + sdk.NewAttribute(types.AttributeKeyChannel, channelId), + sdk.NewAttribute(types.AttributeKeyAmount, amount.String()), + sdk.NewAttribute(types.AttributeKeyError, err.Error()), + ), + ) +} + +// Adds an amount to the flow in either the SEND or RECV direction +func (k Keeper) UpdateFlow(rateLimit types.RateLimit, direction types.PacketDirection, amount math.Int) error { + switch direction { + case types.PACKET_SEND: + return rateLimit.Flow.AddOutflow(amount, *rateLimit.Quota) + case types.PACKET_RECV: + return rateLimit.Flow.AddInflow(amount, *rateLimit.Quota) + default: + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid packet direction (%s)", direction.String()) + } +} + +// Checks whether the given packet will exceed the rate limit +// Called by OnRecvPacket and OnSendPacket +func (k Keeper) CheckRateLimitAndUpdateFlow( + ctx sdk.Context, + direction types.PacketDirection, + packetInfo RateLimitedPacketInfo, +) (updatedFlow bool, err error) { + denom := packetInfo.Denom + channelId := packetInfo.ChannelID + amount := packetInfo.Amount + + // If there's no rate limit yet for this denom, no action is necessary + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return false, nil + } + + // Check if the sender/receiver pair is whitelisted + // If so, return a success without modifying the quota + if k.IsAddressPairWhitelisted(ctx, packetInfo.Sender, packetInfo.Receiver) { + return false, nil + } + // Update the flow object with the change in amount + if err := k.UpdateFlow(rateLimit, direction, amount); err != nil { + // If the rate limit was exceeded, emit an event + EmitTransferDeniedEvent(ctx, types.EventRateLimitExceeded, denom, channelId, direction, amount, err) + return false, err + } + // If there's no quota error, update the rate limit object in the store with the new flow + k.SetRateLimit(ctx, rateLimit) + + return true, nil +} + +// If a SendPacket fails or times out, undo the outflow increment that happened during the send +func (k Keeper) UndoSendPacket(ctx sdk.Context, channelId string, sequence uint64, denom string, amount math.Int) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return nil + } + + // If the packet was sent during this quota, decrement the outflow + // Otherwise, it can be ignored + if k.CheckPacketSentDuringCurrentQuota(ctx, channelId, sequence) { + rateLimit.Flow.Outflow = rateLimit.Flow.Outflow.Sub(amount) + k.SetRateLimit(ctx, rateLimit) + + k.RemovePendingSendPacket(ctx, channelId, sequence) + } + + return nil +} + +// Reset the rate limit after expiration +// The inflow and outflow should get reset to 0, the channelValue should be updated, +// and all pending send packet sequence numbers should be removed +func (k Keeper) ResetRateLimit(ctx sdk.Context, denom string, channelId string) error { + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if channelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return types.ErrRateLimitNotFound + } + + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + rateLimit.Flow = &flow + + k.SetRateLimit(ctx, rateLimit) + k.RemoveAllChannelPendingSendPackets(ctx, channelId) + return nil +} + +// Stores/Updates a rate limit object in the store +func (k Keeper) SetRateLimit(ctx sdk.Context, rateLimit types.RateLimit) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(rateLimit.Path.Denom, rateLimit.Path.ChannelId) + rateLimitValue := k.cdc.MustMarshal(&rateLimit) + + store.Set(rateLimitKey, rateLimitValue) +} + +// Removes a rate limit object from the store using denom and channel-id +func (k Keeper) RemoveRateLimit(ctx sdk.Context, denom string, channelId string) error { + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if channelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + _, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return types.ErrRateLimitNotFound + } + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + rateLimitKey := GetRateLimitItemKey(denom, channelId) + store.Delete(rateLimitKey) + + return nil +} + +// Grabs and returns a rate limit object from the store using denom and channel-id +func (k Keeper) GetRateLimit(ctx sdk.Context, denom string, channelId string) (rateLimit types.RateLimit, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(denom, channelId) + rateLimitValue := store.Get(rateLimitKey) + + if len(rateLimitValue) == 0 { + return rateLimit, false + } + + k.cdc.MustUnmarshal(rateLimitValue, &rateLimit) + return rateLimit, true +} + +// AddRateLimit +func (k Keeper) AddRateLimit(ctx sdk.Context, msg *types.MsgAddRateLimit) error { + // Check if this is denom - channel transfer from Picasso + denom := msg.Denom + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if msg.ChannelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + // Confirm the channel value is not zero + channelValue := k.GetChannelValue(ctx, denom) + if channelValue.IsZero() { + return errorsmod.Wrap(types.ErrZeroChannelValue, "zero channel value") + } + + // Confirm the rate limit does not already exist + _, found := k.GetRateLimit(ctx, denom, msg.ChannelId) + if found { + return errorsmod.Wrap(types.ErrRateLimitAlreadyExists, "rate limit already exists") + } + + // Create and store the rate limit object + path := types.Path{ + Denom: denom, + ChannelId: msg.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: channelValue, + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// UpdateRateLimit +func (k Keeper) UpdateRateLimit(ctx sdk.Context, msg *types.MsgUpdateRateLimit) error { + // Check if this is denom - channel transfer from Picasso + denom := msg.Denom + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if msg.ChannelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + // Confirm the rate limit exists + _, found := k.GetRateLimit(ctx, denom, msg.ChannelId) + if !found { + return errorsmod.Wrap(types.ErrRateLimitNotFound, "rate limit not found") + } + + // Update the rate limit object with the new quota information + // The flow should also get reset to 0 + path := types.Path{ + Denom: denom, + ChannelId: msg.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Returns all rate limits stored +func (k Keeper) GetAllRateLimits(ctx sdk.Context) []types.RateLimit { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allRateLimits := []types.RateLimit{} + for ; iterator.Valid(); iterator.Next() { + + rateLimit := types.RateLimit{} + k.cdc.MustUnmarshal(iterator.Value(), &rateLimit) + allRateLimits = append(allRateLimits, rateLimit) + } + + return allRateLimits +} + +// Sets the sequence number of a packet that was just sent +func (k Keeper) SetPendingSendPacket(ctx sdk.Context, channelId string, sequence uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + store.Set(key, []byte{1}) +} + +// Remove a pending packet sequence number from the store +// Used after the ack or timeout for a packet has been received +func (k Keeper) RemovePendingSendPacket(ctx sdk.Context, channelId string, sequence uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + store.Delete(key) +} + +// Checks whether the packet sequence number is in the store - indicating that it was +// sent during the current quota +func (k Keeper) CheckPacketSentDuringCurrentQuota(ctx sdk.Context, channelId string, sequence uint64) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + valueBz := store.Get(key) + found := len(valueBz) != 0 + return found +} + +// Get all pending packet sequence numbers +func (k Keeper) GetAllPendingSendPackets(ctx sdk.Context) []string { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + pendingPackets := []string{} + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + + channelId := string(key[:types.PendingSendPacketChannelLength]) + channelId = strings.TrimRight(channelId, "\x00") // removes null bytes from suffix + sequence := binary.BigEndian.Uint64(key[types.PendingSendPacketChannelLength:]) + + packetId := fmt.Sprintf("%s/%d", channelId, sequence) + pendingPackets = append(pendingPackets, packetId) + } + + return pendingPackets +} + +// Remove all pending sequence numbers from the store +// This is executed when the quota resets +func (k Keeper) RemoveAllChannelPendingSendPackets(ctx sdk.Context, channelId string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + + iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefix(channelId)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} + +// Adds an pair of sender and receiver addresses to the whitelist to allow all +// IBC transfers between those addresses to skip all flow calculations +func (k Keeper) SetWhitelistedAddressPair(ctx sdk.Context, whitelist types.WhitelistedAddressPair) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + key := types.GetAddressWhitelistKey(whitelist.Sender, whitelist.Receiver) + value := k.cdc.MustMarshal(&whitelist) + store.Set(key, value) +} + +// Removes a whitelisted address pair so that it's transfers are counted in the quota +func (k Keeper) RemoveWhitelistedAddressPair(ctx sdk.Context, sender, receiver string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + key := types.GetAddressWhitelistKey(sender, receiver) + store.Delete(key) +} + +// Check if a sender/receiver address pair is currently whitelisted +func (k Keeper) IsAddressPairWhitelisted(ctx sdk.Context, sender, receiver string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + + key := types.GetAddressWhitelistKey(sender, receiver) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the whitelisted addresses +func (k Keeper) GetAllWhitelistedAddressPairs(ctx sdk.Context) []types.WhitelistedAddressPair { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allWhitelistedAddresses := []types.WhitelistedAddressPair{} + for ; iterator.Valid(); iterator.Next() { + whitelist := types.WhitelistedAddressPair{} + k.cdc.MustUnmarshal(iterator.Value(), &whitelist) + allWhitelistedAddresses = append(allWhitelistedAddresses, whitelist) + } + + return allWhitelistedAddresses +} diff --git a/x/ratelimit/module.go b/x/ratelimit/module.go new file mode 100644 index 000000000..6c51bc925 --- /dev/null +++ b/x/ratelimit/module.go @@ -0,0 +1,158 @@ +package ratelimit + +import ( + "context" + "encoding/json" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/notional-labs/centauri/v4/x/ratelimit/client/cli" + "github.com/notional-labs/centauri/v4/x/ratelimit/keeper" + "github.com/notional-labs/centauri/v4/x/ratelimit/types" + "github.com/spf13/cobra" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ module.AppModuleSimulation = AppModule{} +) + +// AppModuleBasic is the router AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(reg codectypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// router module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// RegisterRESTRoutes implements AppModuleBasic interface +func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) {} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper *keeper.Keeper +} + +// NewAppModule creates a new router module +func NewAppModule(k *keeper.Keeper) AppModule { + return AppModule{ + AppModuleBasic: AppModuleBasic{}, + keeper: k, + } +} + +// Name returns the capability module's name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// QuerierRoute implements the AppModule interface +func (AppModule) QuerierRoute() string { + return types.QuerierRoute +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(*am.keeper)) +} + +// InitGenesis performs genesis initialization for the ibc-router module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) + return []abci.ValidatorUpdate{} +} + +// ValidateGenesis performs genesis state validation for the mint module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var data types.GenesisState + if err := cdc.UnmarshalJSON(bz, &data); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return types.ValidateGenesis(data) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the ibc-router +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.BeginBlocker(ctx) +} + +// EndBlock implements the AppModule interface +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the router module. +func (AppModule) GenerateGenesisState(_ *module.SimulationState) {} + +// ProposalContents doesn't return any content functions for governance proposals. +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { //nolint:staticcheck // WeightedProposalContent is necessary to satisfy the module interface + return nil +} + +// RegisterStoreDecoder registers a decoder for router module's types +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} + +// WeightedOperations returns the all the router module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + return nil +} diff --git a/x/ratelimit/relay_test.go b/x/ratelimit/relay_test.go new file mode 100644 index 000000000..50d9ec1b4 --- /dev/null +++ b/x/ratelimit/relay_test.go @@ -0,0 +1,261 @@ +package ratelimit_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" + ratelimittypes "github.com/notional-labs/centauri/v4/x/ratelimit/types" + "github.com/stretchr/testify/suite" +) + +type RateLimitTestSuite struct { + suite.Suite + + coordinator *customibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *customibctesting.TestChain + chainB *customibctesting.TestChain + chainC *customibctesting.TestChain +} + +func (suite *RateLimitTestSuite) SetupTest() { + suite.coordinator = customibctesting.NewCoordinator(suite.T(), 4) + suite.chainA = suite.coordinator.GetChain(customibctesting.GetChainID(1)) + suite.chainB = suite.coordinator.GetChain(customibctesting.GetChainID(2)) + suite.chainC = suite.coordinator.GetChain(customibctesting.GetChainID(3)) +} + +func NewTransferPath(chainA, chainB *customibctesting.TestChain) *customibctesting.Path { + path := customibctesting.NewPath(chainA, chainB) + path.EndpointA.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointB.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointA.ChannelConfig.Version = transfertypes.Version + path.EndpointB.ChannelConfig.Version = transfertypes.Version + + return path +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(RateLimitTestSuite)) +} + +func (suite *RateLimitTestSuite) TestReceiveIBCToken() { + var ( + transferAmount = sdk.NewInt(1000000000) + // when transfer via sdk transfer from A (module) -> B (contract) + ibcDenom = "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878" + nativeDenom = "ppica" + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + nativeTokenReceiveOnChainB = sdk.NewCoin(nativeDenom, transferAmount) + timeoutHeight = clienttypes.NewHeight(1, 110) + expChainABalanceDiff = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // Add parachain token info + chainBtransMiddlewareKeeper := suite.chainB.TransferMiddleware() + err := chainBtransMiddlewareKeeper.AddParachainIBCInfo(suite.chainB.GetContext(), ibcDenom, path.EndpointB.ChannelID, nativeDenom, sdk.DefaultBondDenom) + suite.Require().NoError(err) + + originalChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + originalChainBBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + msg := transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and source chain balance was decreased + newChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainABalance.Sub(expChainABalanceDiff), newChainABalance) + + // and dest chain balance contains voucher + expBalance := originalChainBBalance.Add(nativeTokenReceiveOnChainB) + gotBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + suite.Require().Equal(expBalance, gotBalance) + + // add rate limit + chainBRateLimitKeeper := suite.chainB.RateLimit() + msgAddRateLimit := ratelimittypes.MsgAddRateLimit{ + Denom: nativeDenom, + ChannelId: path.EndpointB.ChannelID, + MaxPercentSend: sdk.NewInt(5), + MaxPercentRecv: sdk.NewInt(5), + DurationHours: 1, + } + err = chainBRateLimitKeeper.AddRateLimit(suite.chainB.GetContext(), &msgAddRateLimit) + suite.Require().NoError(err) + + // send from A to B + transferAmount = transferAmount.Mul(sdk.NewInt(5)).Quo(sdk.NewInt(100)) + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + msg = transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + expBalance = expBalance.Add(sdk.NewCoin(nativeDenom, transferAmount)) + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) + + // send 1 more time + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // not receive token because catch the threshold => balances have no change + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) +} + +func (suite *RateLimitTestSuite) TestSendIBCToken() { + var ( + transferAmount = sdk.NewInt(1000000000) + // when transfer via sdk transfer from A (module) -> B (contract) + ibcDenom = "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878" + nativeDenom = "ppica" + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + nativeTokenReceiveOnChainB = sdk.NewCoin(nativeDenom, transferAmount) + timeoutHeight = clienttypes.NewHeight(1, 110) + expChainABalanceDiff = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // Add parachain token info + chainBtransMiddlewareKeeper := suite.chainB.TransferMiddleware() + err := chainBtransMiddlewareKeeper.AddParachainIBCInfo(suite.chainB.GetContext(), ibcDenom, path.EndpointB.ChannelID, nativeDenom, sdk.DefaultBondDenom) + suite.Require().NoError(err) + + originalChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + originalChainBBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + msg := transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and source chain balance was decreased + newChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainABalance.Sub(expChainABalanceDiff), newChainABalance) + + // and dest chain balance contains voucher + expBalance := originalChainBBalance.Add(nativeTokenReceiveOnChainB) + gotBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + suite.Require().Equal(expBalance, gotBalance) + + originalChainBBalance = gotBalance + // add rate limit + chainBRateLimitKeeper := suite.chainB.RateLimit() + msgAddRateLimit := ratelimittypes.MsgAddRateLimit{ + Denom: nativeDenom, + ChannelId: path.EndpointB.ChannelID, + MaxPercentSend: sdk.NewInt(5), + MaxPercentRecv: sdk.NewInt(5), + DurationHours: 1, + } + err = chainBRateLimitKeeper.AddRateLimit(suite.chainB.GetContext(), &msgAddRateLimit) + suite.Require().NoError(err) + + // send from B to A + transferAmount = transferAmount.Mul(sdk.NewInt(5)).Quo(sdk.NewInt(100)) + nativeTokenSendOnChainB := sdk.NewCoin(nativeDenom, transferAmount) + msg = transfertypes.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, nativeTokenSendOnChainB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainB.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointA.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainB.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPacketsReverse(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + expBalance = originalChainBBalance.Sub(nativeTokenSendOnChainB) + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) + + // send 1 more time + _, err = suite.chainB.SendMsgsWithExpPass(false, msg) + suite.Require().Error(err) // catch the threshold so should not be sent + + // SignAndDeliver calls app.Commit() + suite.chainB.NextBlock() + + // increment sequence for successful transaction execution + err = suite.chainB.SenderAccount.SetSequence(suite.chainB.SenderAccount.GetSequence() + 1) + suite.Require().NoError(err) + + suite.chainB.Coordinator.IncrementTime() + + // not receive token because catch the threshold => balances have no change + balances := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, balances) +} diff --git a/x/ratelimit/types/codec.go b/x/ratelimit/types/codec.go new file mode 100644 index 000000000..9902cbe95 --- /dev/null +++ b/x/ratelimit/types/codec.go @@ -0,0 +1,51 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/legacy" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/types/msgservice" + authzcodec "github.com/cosmos/cosmos-sdk/x/authz/codec" + govcodec "github.com/cosmos/cosmos-sdk/x/gov/codec" + groupcodec "github.com/cosmos/cosmos-sdk/x/group/codec" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// RegisterLegacyAminoCodec registers the account interfaces and concrete types on the +// provided LegacyAmino codec. These types are used for Amino JSON serialization +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + legacy.RegisterAminoMsg(cdc, &MsgAddRateLimit{}, "centauri/MsgAddRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgUpdateRateLimit{}, "centauri/MsgUpdateRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgRemoveRateLimit{}, "centauri/MsgRemoveRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgResetRateLimit{}, "centauri/MsgResetRateLimit") +} + +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgAddRateLimit{}, + &MsgUpdateRateLimit{}, + &MsgRemoveRateLimit{}, + &MsgResetRateLimit{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +var ( + amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewAminoCodec(amino) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + sdk.RegisterLegacyAminoCodec(amino) + + // Register all Amino interfaces and concrete types on the authz and gov Amino codec so that this can later be + // used to properly serialize MsgGrant, MsgExec and MsgSubmitProposal instances + RegisterLegacyAminoCodec(authzcodec.Amino) + RegisterLegacyAminoCodec(govcodec.Amino) + RegisterLegacyAminoCodec(groupcodec.Amino) +} diff --git a/x/ratelimit/types/epoch.go b/x/ratelimit/types/epoch.go new file mode 100644 index 000000000..6ad9be68b --- /dev/null +++ b/x/ratelimit/types/epoch.go @@ -0,0 +1,40 @@ +package types + +import ( + "errors" + "time" +) + +var ( + DAY_EPOCH = "hour" + EpochHourPeriod time.Duration = time.Hour * 24 +) + +// Validate also validates epoch info. +func (epoch EpochInfo) Validate() error { + if epoch.Identifier == "" { + return errors.New("epoch identifier should NOT be empty") + } + if epoch.Duration == 0 { + return errors.New("epoch duration should NOT be 0") + } + if epoch.CurrentEpoch < 0 { + return errors.New("epoch CurrentEpoch must be non-negative") + } + if epoch.CurrentEpochStartHeight < 0 { + return errors.New("epoch CurrentEpoch must be non-negative") + } + return nil +} + +func NewGenesisEpochInfo(identifier string, duration time.Duration) EpochInfo { + return EpochInfo{ + Identifier: identifier, + StartTime: time.Time{}, + Duration: duration, + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + } +} diff --git a/x/ratelimit/types/epoch.pb.go b/x/ratelimit/types/epoch.pb.go new file mode 100644 index 000000000..78b7d7016 --- /dev/null +++ b/x/ratelimit/types/epoch.pb.go @@ -0,0 +1,636 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/epoch.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EpochInfo struct { + // identifier is a unique reference to this particular timer. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time" yaml:"start_time"` + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + Duration time.Duration `protobuf:"bytes,3,opt,name=duration,proto3,stdduration" json:"duration,omitempty" yaml:"duration"` + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + CurrentEpoch int64 `protobuf:"varint,4,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + CurrentEpochStartTime time.Time `protobuf:"bytes,5,opt,name=current_epoch_start_time,json=currentEpochStartTime,proto3,stdtime" json:"current_epoch_start_time" yaml:"current_epoch_start_time"` + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + EpochCountingStarted bool `protobuf:"varint,6,opt,name=epoch_counting_started,json=epochCountingStarted,proto3" json:"epoch_counting_started,omitempty"` + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + CurrentEpochStartHeight int64 `protobuf:"varint,8,opt,name=current_epoch_start_height,json=currentEpochStartHeight,proto3" json:"current_epoch_start_height,omitempty"` +} + +func (m *EpochInfo) Reset() { *m = EpochInfo{} } +func (m *EpochInfo) String() string { return proto.CompactTextString(m) } +func (*EpochInfo) ProtoMessage() {} +func (*EpochInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2d9490a2e433bc10, []int{0} +} +func (m *EpochInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EpochInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EpochInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochInfo.Merge(m, src) +} +func (m *EpochInfo) XXX_Size() int { + return m.Size() +} +func (m *EpochInfo) XXX_DiscardUnknown() { + xxx_messageInfo_EpochInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochInfo proto.InternalMessageInfo + +func (m *EpochInfo) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *EpochInfo) GetStartTime() time.Time { + if m != nil { + return m.StartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpoch() int64 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpochStartTime() time.Time { + if m != nil { + return m.CurrentEpochStartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetEpochCountingStarted() bool { + if m != nil { + return m.EpochCountingStarted + } + return false +} + +func (m *EpochInfo) GetCurrentEpochStartHeight() int64 { + if m != nil { + return m.CurrentEpochStartHeight + } + return 0 +} + +func init() { + proto.RegisterType((*EpochInfo)(nil), "centauri.ratelimit.v1beta1.EpochInfo") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/epoch.proto", fileDescriptor_2d9490a2e433bc10) +} + +var fileDescriptor_2d9490a2e433bc10 = []byte{ + // 425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8f, 0x93, 0x40, + 0x14, 0xc7, 0x19, 0xb7, 0xae, 0x74, 0xd4, 0xe8, 0x4e, 0x56, 0x45, 0x12, 0x07, 0x82, 0x89, 0x21, + 0x59, 0x03, 0xa9, 0x7a, 0xd2, 0x5b, 0xd5, 0x44, 0x3d, 0xb2, 0x1e, 0x8c, 0x97, 0x86, 0xd2, 0x29, + 0x4c, 0x52, 0x18, 0x32, 0x7d, 0x18, 0x7b, 0xf3, 0x23, 0xf4, 0xe8, 0x47, 0xda, 0xe3, 0x1e, 0x3d, + 0xa1, 0x69, 0x6f, 0x1e, 0xf7, 0x13, 0x18, 0x66, 0xa0, 0xa2, 0xd5, 0xec, 0x0d, 0xde, 0xff, 0xf7, + 0xfe, 0xff, 0xf7, 0x5e, 0x06, 0x3f, 0x4a, 0x58, 0x01, 0x71, 0x25, 0x79, 0x28, 0x63, 0x60, 0x0b, + 0x9e, 0x73, 0x08, 0x3f, 0x8d, 0xa6, 0x0c, 0xe2, 0x51, 0xc8, 0x4a, 0x91, 0x64, 0x41, 0x29, 0x05, + 0x08, 0x62, 0x77, 0x5c, 0xb0, 0xe3, 0x82, 0x96, 0xb3, 0x8f, 0x53, 0x91, 0x0a, 0x85, 0x85, 0xcd, + 0x97, 0xee, 0xb0, 0x69, 0x2a, 0x44, 0xba, 0x60, 0xa1, 0xfa, 0x9b, 0x56, 0xf3, 0x70, 0x56, 0xc9, + 0x18, 0xb8, 0x28, 0x5a, 0xdd, 0xf9, 0x5b, 0x07, 0x9e, 0xb3, 0x25, 0xc4, 0x79, 0xa9, 0x01, 0x6f, + 0x3d, 0xc0, 0xc3, 0xd7, 0xcd, 0x08, 0x6f, 0x8b, 0xb9, 0x20, 0x14, 0x63, 0x3e, 0x63, 0x05, 0xf0, + 0x39, 0x67, 0xd2, 0x42, 0x2e, 0xf2, 0x87, 0x51, 0xaf, 0x42, 0x3e, 0x60, 0xbc, 0x84, 0x58, 0xc2, + 0xa4, 0xb1, 0xb1, 0xae, 0xb8, 0xc8, 0xbf, 0xfe, 0xc4, 0x0e, 0x74, 0x46, 0xd0, 0x65, 0x04, 0xef, + 0xbb, 0x8c, 0xf1, 0x83, 0xb3, 0xda, 0x31, 0x2e, 0x6a, 0xe7, 0x68, 0x15, 0xe7, 0x8b, 0xe7, 0xde, + 0xef, 0x5e, 0x6f, 0xfd, 0xdd, 0x41, 0xd1, 0x50, 0x15, 0x1a, 0x9c, 0x64, 0xd8, 0xec, 0x46, 0xb7, + 0x0e, 0x94, 0xef, 0xfd, 0x3d, 0xdf, 0x57, 0x2d, 0x30, 0x1e, 0x35, 0xb6, 0x3f, 0x6b, 0x87, 0x74, + 0x2d, 0x8f, 0x45, 0xce, 0x81, 0xe5, 0x25, 0xac, 0x2e, 0x6a, 0xe7, 0x96, 0x0e, 0xeb, 0x34, 0xef, + 0x6b, 0x13, 0xb5, 0x73, 0x27, 0x0f, 0xf1, 0xcd, 0xa4, 0x92, 0x92, 0x15, 0x30, 0x51, 0xb7, 0xb7, + 0x06, 0x2e, 0xf2, 0x0f, 0xa2, 0x1b, 0x6d, 0x51, 0x1d, 0x83, 0x7c, 0x41, 0xd8, 0xfa, 0x83, 0x9a, + 0xf4, 0xf6, 0xbe, 0x7a, 0xe9, 0xde, 0x27, 0xed, 0xde, 0x8e, 0x1e, 0xe5, 0x7f, 0x4e, 0xfa, 0x0a, + 0x77, 0xfa, 0xc9, 0xa7, 0xbb, 0x8b, 0x3c, 0xc3, 0x77, 0x35, 0x9f, 0x88, 0xaa, 0x00, 0x5e, 0xa4, + 0xba, 0x91, 0xcd, 0xac, 0x43, 0x17, 0xf9, 0x66, 0x74, 0xac, 0xd4, 0x97, 0xad, 0x78, 0xaa, 0x35, + 0xf2, 0x02, 0xdb, 0xff, 0x4a, 0xcb, 0x18, 0x4f, 0x33, 0xb0, 0x4c, 0xb5, 0xea, 0xbd, 0xbd, 0xc0, + 0x37, 0x4a, 0x7e, 0x37, 0x30, 0xaf, 0xdd, 0x36, 0xc7, 0x27, 0x67, 0x1b, 0x8a, 0xce, 0x37, 0x14, + 0xfd, 0xd8, 0x50, 0xb4, 0xde, 0x52, 0xe3, 0x7c, 0x4b, 0x8d, 0x6f, 0x5b, 0x6a, 0x7c, 0x3c, 0xfa, + 0xdc, 0x7b, 0xc0, 0xb0, 0x2a, 0xd9, 0x72, 0x7a, 0xa8, 0xb6, 0x7f, 0xfa, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0xa1, 0x47, 0xfe, 0xe3, 0x02, 0x00, 0x00, +} + +func (m *EpochInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentEpochStartHeight != 0 { + i = encodeVarintEpoch(dAtA, i, uint64(m.CurrentEpochStartHeight)) + i-- + dAtA[i] = 0x40 + } + if m.EpochCountingStarted { + i-- + if m.EpochCountingStarted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CurrentEpochStartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintEpoch(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x2a + if m.CurrentEpoch != 0 { + i = encodeVarintEpoch(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x20 + } + n2, err2 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintEpoch(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + n3, err3 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintEpoch(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x12 + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintEpoch(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEpoch(dAtA []byte, offset int, v uint64) int { + offset -= sovEpoch(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EpochInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovEpoch(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovEpoch(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovEpoch(uint64(l)) + if m.CurrentEpoch != 0 { + n += 1 + sovEpoch(uint64(m.CurrentEpoch)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime) + n += 1 + l + sovEpoch(uint64(l)) + if m.EpochCountingStarted { + n += 2 + } + if m.CurrentEpochStartHeight != 0 { + n += 1 + sovEpoch(uint64(m.CurrentEpochStartHeight)) + } + return n +} + +func sovEpoch(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEpoch(x uint64) (n int) { + return sovEpoch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EpochInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CurrentEpochStartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochCountingStarted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EpochCountingStarted = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartHeight", wireType) + } + m.CurrentEpochStartHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpochStartHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEpoch(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEpoch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEpoch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEpoch + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEpoch + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEpoch + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEpoch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEpoch = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEpoch = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/errors.go b/x/ratelimit/types/errors.go new file mode 100644 index 000000000..c362de065 --- /dev/null +++ b/x/ratelimit/types/errors.go @@ -0,0 +1,16 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +// x/ratelimit module sentinel errors +var ( + ErrRateLimitAlreadyExists = errorsmod.Register(ModuleName, 1, "ratelimit key duplicated") + ErrRateLimitNotFound = errorsmod.Register(ModuleName, 2, "rate limit not found") + ErrZeroChannelValue = errorsmod.Register(ModuleName, 3, "channel value is zero") + ErrQuotaExceeded = errorsmod.Register(ModuleName, 4, "quota exceeded") + ErrInvalidClientState = errorsmod.Register(ModuleName, 5, "unable to determine client state from channelId") + ErrChannelNotFound = errorsmod.Register(ModuleName, 6, "channel does not exist") + ErrDenomIsBlacklisted = errorsmod.Register(ModuleName, 7, "denom is blacklisted") +) diff --git a/x/ratelimit/types/events.go b/x/ratelimit/types/events.go new file mode 100644 index 000000000..a63cde3c2 --- /dev/null +++ b/x/ratelimit/types/events.go @@ -0,0 +1,20 @@ +package types + +var ( + EventTransferDenied = "transfer_denied" + + EventRateLimitExceeded = "rate_limit_exceeded" + + AttributeKeyReason = "reason" + AttributeKeyModule = "module" + AttributeKeyAction = "action" + AttributeKeyDenom = "denom" + AttributeKeyChannel = "channel" + AttributeKeyAmount = "amount" + AttributeKeyError = "error" + + EventTypeEpochEnd = "epoch_end" // TODO: need to clean up (not use) + EventTypeEpochStart = "epoch_start" + AttributeEpochNumber = "epoch_number" + AttributeEpochStartTime = "start_time" +) diff --git a/x/ratelimit/types/expected_keepers.go b/x/ratelimit/types/expected_keepers.go new file mode 100644 index 000000000..e1f5bd29a --- /dev/null +++ b/x/ratelimit/types/expected_keepers.go @@ -0,0 +1,20 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +// BankKeeper defines the banking contract that must be fulfilled when +// creating a x/ratelimit keeper. +type BankKeeper interface { + GetSupply(ctx sdk.Context, denom string) sdk.Coin +} + +// ChannelKeeper defines the channel contract that must be fulfilled when +// creating a x/ratelimit keeper. +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, portID string, channelID string) (channeltypes.Channel, bool) + GetChannelClientState(ctx sdk.Context, portID string, channelID string) (string, exported.ClientState, error) +} diff --git a/x/ratelimit/types/flow.go b/x/ratelimit/types/flow.go new file mode 100644 index 000000000..dce7192ab --- /dev/null +++ b/x/ratelimit/types/flow.go @@ -0,0 +1,47 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" +) + +// Initializes a new flow from the channel value +func NewFlow(channelValue math.Int) Flow { + flow := Flow{ + ChannelValue: channelValue, + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + } + + return flow +} + +// Adds an amount to the rate limit's flow after an incoming packet was received +// Returns an error if the new inflow will cause the rate limit to exceed its quota +func (f *Flow) AddInflow(amount math.Int, quota Quota) error { + netInflow := f.Inflow.Sub(f.Outflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_RECV, netInflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Inflow exceeds quota - Net Inflow: %v, Channel Value: %v, Threshold: %v%%", + netInflow, f.ChannelValue, quota.MaxPercentRecv) + } + + f.Inflow = f.Inflow.Add(amount) + return nil +} + +// Adds an amount to the rate limit's flow after a packet was sent +// Returns an error if the new outflow will cause the rate limit to exceed its quota +func (f *Flow) AddOutflow(amount math.Int, quota Quota) error { + netOutflow := f.Outflow.Sub(f.Inflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_SEND, netOutflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Outflow exceeds quota - Net Outflow: %v, Channel Value: %v, Threshold: %v%%", + netOutflow, f.ChannelValue, quota.MaxPercentSend) + } + + f.Outflow = f.Outflow.Add(amount) + return nil +} diff --git a/x/ratelimit/types/genesis.go b/x/ratelimit/types/genesis.go new file mode 100644 index 000000000..ca95a8d53 --- /dev/null +++ b/x/ratelimit/types/genesis.go @@ -0,0 +1,16 @@ +package types + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + RateLimits: []RateLimit{}, + Epochs: []EpochInfo{NewGenesisEpochInfo(DAY_EPOCH, EpochHourPeriod)}, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func ValidateGenesis(data GenesisState) error { + return data.Params.Validate() +} diff --git a/x/ratelimit/types/genesis.pb.go b/x/ratelimit/types/genesis.pb.go new file mode 100644 index 000000000..23b08cf9a --- /dev/null +++ b/x/ratelimit/types/genesis.pb.go @@ -0,0 +1,577 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ratelimit module's genesis state. +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` + RateLimits []RateLimit `protobuf:"bytes,2,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits" yaml:"rate_limits"` + WhitelistedAddressPairs []WhitelistedAddressPair `protobuf:"bytes,3,rep,name=whitelisted_address_pairs,json=whitelistedAddressPairs,proto3" json:"whitelisted_address_pairs" yaml:"whitelisted_address_pairs"` + PendingSendPacketSequenceNumbers []string `protobuf:"bytes,4,rep,name=pending_send_packet_sequence_numbers,json=pendingSendPacketSequenceNumbers,proto3" json:"pending_send_packet_sequence_numbers,omitempty"` + Epochs []EpochInfo `protobuf:"bytes,5,rep,name=epochs,proto3" json:"epochs"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_9b6ba3f85e177adf, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func (m *GenesisState) GetWhitelistedAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.WhitelistedAddressPairs + } + return nil +} + +func (m *GenesisState) GetPendingSendPacketSequenceNumbers() []string { + if m != nil { + return m.PendingSendPacketSequenceNumbers + } + return nil +} + +func (m *GenesisState) GetEpochs() []EpochInfo { + if m != nil { + return m.Epochs + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "centauri.ratelimit.v1beta1.GenesisState") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/genesis.proto", fileDescriptor_9b6ba3f85e177adf) +} + +var fileDescriptor_9b6ba3f85e177adf = []byte{ + // 401 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3f, 0xeb, 0xd3, 0x40, + 0x18, 0xc7, 0x13, 0x53, 0x0b, 0x5e, 0x75, 0x30, 0x28, 0xc6, 0x0c, 0x69, 0x08, 0xfe, 0x09, 0x0a, + 0x09, 0xad, 0x9b, 0x9b, 0x11, 0x11, 0x41, 0x4a, 0x4d, 0x07, 0xc1, 0x25, 0x5c, 0x92, 0xc7, 0xf4, + 0xb0, 0xb9, 0xc4, 0xbb, 0xab, 0xb5, 0xef, 0xc2, 0xd1, 0x97, 0xd4, 0xb1, 0xa3, 0x53, 0x91, 0xf6, + 0x1d, 0x08, 0xee, 0x92, 0xdc, 0xd5, 0x76, 0xf8, 0x35, 0xbf, 0xed, 0x38, 0x3e, 0xdf, 0xcf, 0x97, + 0xe7, 0xee, 0x41, 0x7e, 0x06, 0x54, 0xe0, 0x25, 0x23, 0x21, 0xc3, 0x02, 0x16, 0xa4, 0x24, 0x22, + 0xfc, 0x36, 0x4a, 0x41, 0xe0, 0x51, 0x58, 0x00, 0x05, 0x4e, 0x78, 0x50, 0xb3, 0x4a, 0x54, 0xa6, + 0x7d, 0x24, 0x83, 0xff, 0x64, 0xa0, 0x48, 0xfb, 0x5e, 0x51, 0x15, 0x55, 0x8b, 0x85, 0xcd, 0x49, + 0x26, 0xec, 0xa7, 0x1d, 0xee, 0x1a, 0x33, 0x5c, 0x2a, 0xb5, 0xfd, 0xac, 0x03, 0x3c, 0x95, 0x49, + 0xf6, 0x49, 0x07, 0x0b, 0x75, 0x95, 0xcd, 0x25, 0xe7, 0xfd, 0x35, 0xd0, 0xed, 0xb7, 0x72, 0x80, + 0x99, 0xc0, 0x02, 0xcc, 0x0f, 0xa8, 0x2f, 0x4b, 0x2d, 0xdd, 0xd5, 0xfd, 0xc1, 0xd8, 0x0b, 0x2e, + 0x0f, 0x14, 0x4c, 0x5b, 0x32, 0xba, 0xbf, 0xd9, 0x0d, 0xb5, 0x3f, 0xbb, 0xe1, 0x9d, 0x35, 0x2e, + 0x17, 0x2f, 0x3d, 0x99, 0xf7, 0x62, 0x25, 0x32, 0x53, 0x34, 0x68, 0xa2, 0x49, 0x9b, 0xe5, 0xd6, + 0x0d, 0xd7, 0xf0, 0x07, 0xe3, 0xc7, 0x5d, 0xde, 0x18, 0x0b, 0x78, 0xdf, 0xdc, 0x44, 0xb6, 0x52, + 0x9b, 0x52, 0x7d, 0xe6, 0xf1, 0x62, 0xc4, 0x8e, 0x18, 0x37, 0x7f, 0xea, 0xe8, 0xe1, 0x6a, 0x4e, + 0x1a, 0x11, 0x17, 0x90, 0x27, 0x38, 0xcf, 0x19, 0x70, 0x9e, 0xd4, 0x98, 0x30, 0x6e, 0x19, 0x6d, + 0xe5, 0xb8, 0xab, 0xf2, 0xe3, 0x29, 0xfc, 0x4a, 0x66, 0xa7, 0x98, 0xb0, 0xc8, 0x57, 0xfd, 0xae, + 0xec, 0xbf, 0x58, 0xe1, 0xc5, 0x0f, 0x56, 0x57, 0x1a, 0xb8, 0x39, 0x41, 0x8f, 0x6a, 0xa0, 0x39, + 0xa1, 0x45, 0xc2, 0x81, 0xe6, 0x49, 0x8d, 0xb3, 0x2f, 0x20, 0x12, 0x0e, 0x5f, 0x97, 0x40, 0x33, + 0x48, 0xe8, 0xb2, 0x4c, 0x81, 0x71, 0xab, 0xe7, 0x1a, 0xfe, 0xad, 0xd8, 0x55, 0xec, 0x0c, 0x68, + 0x3e, 0x6d, 0xc9, 0x99, 0x02, 0x27, 0x92, 0x33, 0x5f, 0xa3, 0x7e, 0xfb, 0x83, 0xdc, 0xba, 0x79, + 0xfd, 0x4b, 0xbe, 0x69, 0xc8, 0x77, 0xf4, 0x73, 0x15, 0xf5, 0x9a, 0x49, 0x62, 0x15, 0x8d, 0x9e, + 0x6f, 0xf6, 0x8e, 0xbe, 0xdd, 0x3b, 0xfa, 0xef, 0xbd, 0xa3, 0xff, 0x38, 0x38, 0xda, 0xf6, 0xe0, + 0x68, 0xbf, 0x0e, 0x8e, 0xf6, 0xe9, 0xee, 0xf7, 0xb3, 0x95, 0x11, 0xeb, 0x1a, 0x78, 0xda, 0x6f, + 0x77, 0xe5, 0xc5, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xbb, 0xa3, 0x8c, 0x06, 0x03, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for iNdEx := len(m.PendingSendPacketSequenceNumbers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PendingSendPacketSequenceNumbers[iNdEx]) + copy(dAtA[i:], m.PendingSendPacketSequenceNumbers[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PendingSendPacketSequenceNumbers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for iNdEx := len(m.WhitelistedAddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WhitelistedAddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for _, e := range m.WhitelistedAddressPairs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for _, s := range m.PendingSendPacketSequenceNumbers { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhitelistedAddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhitelistedAddressPairs = append(m.WhitelistedAddressPairs, WhitelistedAddressPair{}) + if err := m.WhitelistedAddressPairs[len(m.WhitelistedAddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingSendPacketSequenceNumbers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingSendPacketSequenceNumbers = append(m.PendingSendPacketSequenceNumbers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, EpochInfo{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/keys.go b/x/ratelimit/types/keys.go new file mode 100644 index 000000000..246dcc301 --- /dev/null +++ b/x/ratelimit/types/keys.go @@ -0,0 +1,45 @@ +package types + +import "encoding/binary" + +const ( + ModuleName = "ratelimit" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} + +var ( + PathKeyPrefix = KeyPrefix("path") + RateLimitKeyPrefix = KeyPrefix("rate-limit") + PendingSendPacketPrefix = KeyPrefix("pending-send-packet") + DenomBlacklistKeyPrefix = KeyPrefix("denom-blacklist") + AddressWhitelistKeyPrefix = KeyPrefix("address-blacklist") + EpochKeyPrefix = KeyPrefix("epoch") + + PendingSendPacketChannelLength int = 16 +) + +func GetPendingSendPacketKey(channelId string, sequenceNumber uint64) []byte { + channelIdBz := make([]byte, PendingSendPacketChannelLength) + copy(channelIdBz[:], channelId) + + sequenceNumberBz := make([]byte, 8) + binary.BigEndian.PutUint64(sequenceNumberBz, sequenceNumber) + + return append(channelIdBz, sequenceNumberBz...) +} + +func GetAddressWhitelistKey(sender, receiver string) []byte { + return append(KeyPrefix(sender), KeyPrefix(receiver)...) +} diff --git a/x/ratelimit/types/msg.go b/x/ratelimit/types/msg.go new file mode 100644 index 000000000..caaaf77d6 --- /dev/null +++ b/x/ratelimit/types/msg.go @@ -0,0 +1,244 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" +) + +const ( + TypeMsgAddRateLimit = "add_rate_limit" + TypeMsgUpdateRateLimit = "update_rate_limit" + TypeMsgRemoveRateLimit = "remove_rate_limit" + TypeMsgResetRateLimit = "reset_rate_limit" +) + +var _ sdk.Msg = &MsgAddRateLimit{} + +func NewMsgAddRateLimit( + authority string, + denom string, + channelID string, + maxPercentSend math.Int, + maxPercentRecv math.Int, + durationHours uint64, +) *MsgAddRateLimit { + return &MsgAddRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +// Route Implements Msg. +func (msg MsgAddRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgAddRateLimit) Type() string { return TypeMsgAddRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgAddRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgAddRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgAddRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + if msg.MaxPercentSend.GT(math.NewInt(100)) || msg.MaxPercentSend.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(math.NewInt(100)) || msg.MaxPercentRecv.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +var _ sdk.Msg = &MsgUpdateRateLimit{} + +func NewMsgUpdateRateLimit( + authority string, + denom string, + channelID string, + maxPercentSend math.Int, + maxPercentRecv math.Int, + durationHours uint64, +) *MsgUpdateRateLimit { + return &MsgUpdateRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +// Route Implements Msg. +func (msg MsgUpdateRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgUpdateRateLimit) Type() string { return TypeMsgUpdateRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgUpdateRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgUpdateRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + if msg.MaxPercentSend.GT(math.NewInt(100)) || msg.MaxPercentSend.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(math.NewInt(100)) || msg.MaxPercentRecv.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +var _ sdk.Msg = &MsgRemoveRateLimit{} + +func NewMsgRemoveRateLimit( + authority string, + denom string, + channelID string, +) *MsgRemoveRateLimit { + return &MsgRemoveRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + } +} + +// Route Implements Msg. +func (msg MsgRemoveRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgRemoveRateLimit) Type() string { return TypeMsgRemoveRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgRemoveRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgRemoveRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgRemoveRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + return nil +} + +var _ sdk.Msg = &MsgResetRateLimit{} + +func NewMsgResetRateLimit( + authority string, + denom string, + channelID string, +) *MsgResetRateLimit { + return &MsgResetRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + } +} + +// Route Implements Msg. +func (msg MsgResetRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgResetRateLimit) Type() string { return TypeMsgResetRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgResetRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgResetRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgResetRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + return nil +} diff --git a/x/ratelimit/types/params.go b/x/ratelimit/types/params.go new file mode 100644 index 000000000..4f3215e35 --- /dev/null +++ b/x/ratelimit/types/params.go @@ -0,0 +1,32 @@ +package types + +import ( + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams() Params { + return Params{} +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams() +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{} +} + +// Validate validates the set of params +func (p Params) Validate() error { + return nil +} diff --git a/x/ratelimit/types/params.pb.go b/x/ratelimit/types/params.pb.go new file mode 100644 index 000000000..bc9bf28dc --- /dev/null +++ b/x/ratelimit/types/params.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params holds parameters for the mint module. +type Params struct { +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_1b21f4042f9a3cfb, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "centauri.ratelimit.v1beta1.Params") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/params.proto", fileDescriptor_1b21f4042f9a3cfb) +} + +var fileDescriptor_1b21f4042f9a3cfb = []byte{ + // 152 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0x4e, 0xcd, 0x2b, + 0x49, 0x2c, 0x2d, 0xca, 0xd4, 0x2f, 0x4a, 0x2c, 0x49, 0xcd, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, + 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x82, 0x29, 0xd4, 0x83, 0x2b, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, + 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd3, 0x07, 0xb1, 0x20, 0x3a, 0xa4, 0x24, 0x93, 0xf3, 0x8b, 0x73, + 0xf3, 0x8b, 0xe3, 0x21, 0x12, 0x10, 0x0e, 0x44, 0x4a, 0x89, 0x83, 0x8b, 0x2d, 0x00, 0x6c, 0xb8, + 0x93, 0xf6, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, + 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x09, 0x56, 0x20, 0x39, + 0xa9, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0xac, 0xdb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x91, 0xb5, 0x1b, 0x71, 0xb5, 0x00, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.go b/x/ratelimit/types/query.pb.go new file mode 100644 index 000000000..9c9dcb3d8 --- /dev/null +++ b/x/ratelimit/types/query.pb.go @@ -0,0 +1,2108 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryAllRateLimitsRequest struct { +} + +func (m *QueryAllRateLimitsRequest) Reset() { *m = QueryAllRateLimitsRequest{} } +func (m *QueryAllRateLimitsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsRequest) ProtoMessage() {} +func (*QueryAllRateLimitsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{0} +} +func (m *QueryAllRateLimitsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsRequest.Merge(m, src) +} +func (m *QueryAllRateLimitsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsRequest proto.InternalMessageInfo + +type QueryAllRateLimitsResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryAllRateLimitsResponse) Reset() { *m = QueryAllRateLimitsResponse{} } +func (m *QueryAllRateLimitsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsResponse) ProtoMessage() {} +func (*QueryAllRateLimitsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{1} +} +func (m *QueryAllRateLimitsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsResponse.Merge(m, src) +} +func (m *QueryAllRateLimitsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsResponse proto.InternalMessageInfo + +func (m *QueryAllRateLimitsResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitRequest) Reset() { *m = QueryRateLimitRequest{} } +func (m *QueryRateLimitRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitRequest) ProtoMessage() {} +func (*QueryRateLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{2} +} +func (m *QueryRateLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitRequest.Merge(m, src) +} +func (m *QueryRateLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitRequest proto.InternalMessageInfo + +func (m *QueryRateLimitRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryRateLimitRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitResponse struct { + RateLimit *RateLimit `protobuf:"bytes,1,opt,name=rate_limit,json=rateLimit,proto3" json:"rate_limit,omitempty"` +} + +func (m *QueryRateLimitResponse) Reset() { *m = QueryRateLimitResponse{} } +func (m *QueryRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitResponse) ProtoMessage() {} +func (*QueryRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{3} +} +func (m *QueryRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitResponse.Merge(m, src) +} +func (m *QueryRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitResponse proto.InternalMessageInfo + +func (m *QueryRateLimitResponse) GetRateLimit() *RateLimit { + if m != nil { + return m.RateLimit + } + return nil +} + +type QueryRateLimitsByChainIdRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryRateLimitsByChainIdRequest) Reset() { *m = QueryRateLimitsByChainIdRequest{} } +func (m *QueryRateLimitsByChainIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChainIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{4} +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +type QueryRateLimitsByChainIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChainIdResponse) Reset() { *m = QueryRateLimitsByChainIdResponse{} } +func (m *QueryRateLimitsByChainIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChainIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{5} +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitsByChannelIdRequest struct { + ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitsByChannelIdRequest) Reset() { *m = QueryRateLimitsByChannelIdRequest{} } +func (m *QueryRateLimitsByChannelIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{6} +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitsByChannelIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChannelIdResponse) Reset() { *m = QueryRateLimitsByChannelIdResponse{} } +func (m *QueryRateLimitsByChannelIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{7} +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryAllWhitelistedAddressesRequest struct { +} + +func (m *QueryAllWhitelistedAddressesRequest) Reset() { *m = QueryAllWhitelistedAddressesRequest{} } +func (m *QueryAllWhitelistedAddressesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesRequest) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{8} +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesRequest proto.InternalMessageInfo + +type QueryAllWhitelistedAddressesResponse struct { + AddressPairs []WhitelistedAddressPair `protobuf:"bytes,1,rep,name=address_pairs,json=addressPairs,proto3" json:"address_pairs"` +} + +func (m *QueryAllWhitelistedAddressesResponse) Reset() { *m = QueryAllWhitelistedAddressesResponse{} } +func (m *QueryAllWhitelistedAddressesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesResponse) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{9} +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesResponse proto.InternalMessageInfo + +func (m *QueryAllWhitelistedAddressesResponse) GetAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.AddressPairs + } + return nil +} + +func init() { + proto.RegisterType((*QueryAllRateLimitsRequest)(nil), "centauri.ratelimit.v1beta1.QueryAllRateLimitsRequest") + proto.RegisterType((*QueryAllRateLimitsResponse)(nil), "centauri.ratelimit.v1beta1.QueryAllRateLimitsResponse") + proto.RegisterType((*QueryRateLimitRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitRequest") + proto.RegisterType((*QueryRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitResponse") + proto.RegisterType((*QueryRateLimitsByChainIdRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChainIdRequest") + proto.RegisterType((*QueryRateLimitsByChainIdResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChainIdResponse") + proto.RegisterType((*QueryRateLimitsByChannelIdRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChannelIdRequest") + proto.RegisterType((*QueryRateLimitsByChannelIdResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChannelIdResponse") + proto.RegisterType((*QueryAllWhitelistedAddressesRequest)(nil), "centauri.ratelimit.v1beta1.QueryAllWhitelistedAddressesRequest") + proto.RegisterType((*QueryAllWhitelistedAddressesResponse)(nil), "centauri.ratelimit.v1beta1.QueryAllWhitelistedAddressesResponse") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/query.proto", fileDescriptor_b0fc123b957ea496) +} + +var fileDescriptor_b0fc123b957ea496 = []byte{ + // 612 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x6f, 0x12, 0x41, + 0x18, 0xc6, 0x99, 0x6a, 0xad, 0xbc, 0xb5, 0x07, 0xc7, 0x56, 0xe9, 0xaa, 0x5b, 0x5c, 0x6d, 0x53, + 0x25, 0xb2, 0x01, 0x52, 0x2f, 0xad, 0x7f, 0x8a, 0x5e, 0x9a, 0x70, 0x50, 0x2e, 0x26, 0x26, 0x4a, + 0x06, 0x76, 0x02, 0x6b, 0xe8, 0xce, 0x76, 0x67, 0x50, 0x89, 0xe9, 0xc5, 0xc4, 0xbb, 0x89, 0xdf, + 0xc1, 0xab, 0x1f, 0xc2, 0x0b, 0x89, 0x07, 0x9b, 0x78, 0xf1, 0x64, 0x0c, 0xf8, 0x41, 0xcc, 0xce, + 0xce, 0x2e, 0x42, 0x61, 0x0b, 0x24, 0xbd, 0x2d, 0x33, 0xef, 0xf3, 0xcc, 0xef, 0x7d, 0x77, 0x9e, + 0x05, 0x36, 0x6a, 0xd4, 0x11, 0xa4, 0xe5, 0xd9, 0xa6, 0x47, 0x04, 0x6d, 0xda, 0xfb, 0xb6, 0x30, + 0xdf, 0xe4, 0xaa, 0x54, 0x90, 0x9c, 0x79, 0xd0, 0xa2, 0x5e, 0x3b, 0xeb, 0x7a, 0x4c, 0x30, 0xac, + 0x85, 0x75, 0xd9, 0xa8, 0x2e, 0xab, 0xea, 0xb4, 0x3b, 0x31, 0x1e, 0xfd, 0x6a, 0xe9, 0xa3, 0x5d, + 0xab, 0x33, 0x56, 0x6f, 0x52, 0x93, 0xb8, 0xb6, 0x49, 0x1c, 0x87, 0x09, 0x22, 0x6c, 0xe6, 0x70, + 0xb5, 0xbb, 0x5c, 0x67, 0x75, 0x26, 0x1f, 0x4d, 0xff, 0x29, 0x58, 0x35, 0xae, 0xc2, 0xea, 0x33, + 0x1f, 0x65, 0xb7, 0xd9, 0x2c, 0x13, 0x41, 0x4b, 0xbe, 0x1d, 0x2f, 0xd3, 0x83, 0x16, 0xe5, 0xc2, + 0x78, 0x0d, 0xda, 0xa8, 0x4d, 0xee, 0x32, 0x87, 0x53, 0x5c, 0x82, 0x45, 0x9f, 0xa0, 0x22, 0x11, + 0x78, 0x0a, 0xa5, 0xcf, 0x6c, 0x2e, 0xe6, 0xd7, 0xb3, 0xe3, 0x9b, 0xc9, 0x46, 0x26, 0xc5, 0xb3, + 0x9d, 0xdf, 0x6b, 0x89, 0x32, 0x78, 0x91, 0xab, 0x51, 0x82, 0x15, 0x79, 0x56, 0x54, 0xa3, 0x20, + 0xf0, 0x32, 0xcc, 0x5b, 0xd4, 0x61, 0xfb, 0x29, 0x94, 0x46, 0x9b, 0xc9, 0x72, 0xf0, 0x03, 0x5f, + 0x07, 0xa8, 0x35, 0x88, 0xe3, 0xd0, 0x66, 0xc5, 0xb6, 0x52, 0x73, 0x72, 0x2b, 0xa9, 0x56, 0xf6, + 0x2c, 0xe3, 0x15, 0x5c, 0x1e, 0x76, 0x53, 0xd4, 0x4f, 0x00, 0xfa, 0xd4, 0xd2, 0x73, 0x52, 0xe8, + 0x72, 0x32, 0xc2, 0x35, 0x76, 0x60, 0x6d, 0xd0, 0x9f, 0x17, 0xdb, 0x8f, 0x1b, 0xc4, 0x76, 0xf6, + 0xac, 0x90, 0x7b, 0x15, 0xce, 0xd7, 0xfc, 0x15, 0x9f, 0x2f, 0x40, 0x5f, 0xa8, 0x05, 0x15, 0x86, + 0x0b, 0xe9, 0xf1, 0xea, 0x53, 0x99, 0x6e, 0x11, 0x6e, 0x8c, 0x3a, 0x31, 0x98, 0x56, 0x48, 0x3c, + 0x38, 0x53, 0x34, 0x3c, 0x53, 0x0f, 0x8c, 0x38, 0x8f, 0x53, 0xe1, 0x5e, 0x87, 0x9b, 0xe1, 0x0d, + 0x7c, 0xde, 0xb0, 0x7d, 0x25, 0x17, 0xd4, 0xda, 0xb5, 0x2c, 0x8f, 0x72, 0x4e, 0xa3, 0x8b, 0xfa, + 0x11, 0xc1, 0xad, 0xf8, 0x3a, 0x45, 0xf7, 0x12, 0x96, 0x48, 0xb0, 0x58, 0x71, 0x89, 0xed, 0x85, + 0x7c, 0xf9, 0x38, 0xbe, 0xe3, 0x86, 0x4f, 0x89, 0xed, 0x29, 0xd8, 0x0b, 0xa4, 0xbf, 0xc4, 0xf3, + 0x9d, 0x05, 0x98, 0x97, 0x1c, 0xf8, 0x0b, 0x82, 0xa5, 0x81, 0xd8, 0xe0, 0xad, 0xb8, 0x33, 0xc6, + 0x66, 0x50, 0xbb, 0x37, 0xad, 0x2c, 0xe8, 0xd4, 0xd8, 0xf8, 0xf0, 0xf3, 0xef, 0xe7, 0xb9, 0x34, + 0xd6, 0xcd, 0x11, 0x5f, 0x90, 0xe8, 0x89, 0xe3, 0xaf, 0x08, 0x92, 0x91, 0x1c, 0xe7, 0x4e, 0x3c, + 0x6d, 0x38, 0x9f, 0x5a, 0x7e, 0x1a, 0x89, 0x82, 0xdb, 0x96, 0x70, 0x5b, 0xb8, 0x10, 0x0b, 0x67, + 0xbe, 0xef, 0x5f, 0xc7, 0x43, 0xb3, 0xda, 0xae, 0x04, 0xd1, 0xff, 0x86, 0xe0, 0xd2, 0x88, 0xe4, + 0xe0, 0xed, 0xc9, 0x41, 0x8e, 0xa5, 0x55, 0xdb, 0x99, 0x4d, 0xac, 0xfa, 0xc9, 0xc9, 0x7e, 0x32, + 0xf8, 0x76, 0xfc, 0xb0, 0x65, 0x43, 0xf2, 0x8b, 0x70, 0x88, 0xbf, 0x23, 0x58, 0x19, 0x99, 0x24, + 0x7c, 0x7f, 0x5a, 0x94, 0x81, 0x14, 0x6b, 0x0f, 0x66, 0x95, 0xab, 0x5e, 0x0a, 0xb2, 0x97, 0xbb, + 0x38, 0x33, 0x41, 0x2f, 0xe1, 0xcb, 0xc1, 0x3f, 0x10, 0x5c, 0x19, 0x93, 0x3d, 0xfc, 0x70, 0x92, + 0x1b, 0x1c, 0x93, 0x6e, 0xed, 0xd1, 0xec, 0x06, 0x93, 0xbc, 0x9f, 0xb7, 0x7d, 0x65, 0x85, 0x84, + 0xd2, 0x62, 0xa6, 0xd3, 0xd5, 0xd1, 0x51, 0x57, 0x47, 0x7f, 0xba, 0x3a, 0xfa, 0xd4, 0xd3, 0x13, + 0x47, 0x3d, 0x3d, 0xf1, 0xab, 0xa7, 0x27, 0x5e, 0x5c, 0x7c, 0xf7, 0x9f, 0x58, 0xb4, 0x5d, 0xca, + 0xab, 0xe7, 0xe4, 0x9f, 0x69, 0xe1, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x72, 0x96, 0x2b, + 0xf2, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) + RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) + RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) + AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) { + out := new(QueryAllRateLimitsResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/AllRateLimits", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) { + out := new(QueryRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) { + out := new(QueryRateLimitsByChainIdResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimitsByChainId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) { + out := new(QueryRateLimitsByChannelIdResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimitsByChannelId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) { + out := new(QueryAllWhitelistedAddressesResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/AllWhitelistedAddresses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + AllRateLimits(context.Context, *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) + RateLimit(context.Context, *QueryRateLimitRequest) (*QueryRateLimitResponse, error) + RateLimitsByChainId(context.Context, *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(context.Context, *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) + AllWhitelistedAddresses(context.Context, *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllRateLimits(ctx context.Context, req *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllRateLimits not implemented") +} +func (*UnimplementedQueryServer) RateLimit(ctx context.Context, req *QueryRateLimitRequest) (*QueryRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimit not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChainId(ctx context.Context, req *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChainId not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChannelId(ctx context.Context, req *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChannelId not implemented") +} +func (*UnimplementedQueryServer) AllWhitelistedAddresses(ctx context.Context, req *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllWhitelistedAddresses not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllRateLimitsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllRateLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/AllRateLimits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllRateLimits(ctx, req.(*QueryAllRateLimitsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimit(ctx, req.(*QueryRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChainId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChainIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChainId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimitsByChainId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChainId(ctx, req.(*QueryRateLimitsByChainIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChannelId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChannelIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChannelId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimitsByChannelId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChannelId(ctx, req.(*QueryRateLimitsByChannelIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllWhitelistedAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllWhitelistedAddressesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/AllWhitelistedAddresses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, req.(*QueryAllWhitelistedAddressesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "centauri.ratelimit.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllRateLimits", + Handler: _Query_AllRateLimits_Handler, + }, + { + MethodName: "RateLimit", + Handler: _Query_RateLimit_Handler, + }, + { + MethodName: "RateLimitsByChainId", + Handler: _Query_RateLimitsByChainId_Handler, + }, + { + MethodName: "RateLimitsByChannelId", + Handler: _Query_RateLimitsByChannelId_Handler, + }, + { + MethodName: "AllWhitelistedAddresses", + Handler: _Query_AllWhitelistedAddresses_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "centauri/ratelimit/v1beta1/query.proto", +} + +func (m *QueryAllRateLimitsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllRateLimitsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AddressPairs) > 0 { + for iNdEx := len(m.AddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAllRateLimitsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllRateLimitsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitsByChannelIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChannelIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllWhitelistedAddressesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllWhitelistedAddressesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AddressPairs) > 0 { + for _, e := range m.AddressPairs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAllRateLimitsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllRateLimitsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} + } + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressPairs = append(m.AddressPairs, WhitelistedAddressPair{}) + if err := m.AddressPairs[len(m.AddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.gw.go b/x/ratelimit/types/query.pb.gw.go new file mode 100644 index 000000000..95020deff --- /dev/null +++ b/x/ratelimit/types/query.pb.gw.go @@ -0,0 +1,539 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllRateLimits(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_RateLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RateLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RateLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.RateLimitsByChainId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.RateLimitsByChainId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := client.RateLimitsByChannelId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := server.RateLimitsByChannelId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllWhitelistedAddresses(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllWhitelistedAddresses(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllRateLimits_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllRateLimits_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"centauri", "ratelimit", "ratelimits"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"centauri", "ratelimit", "channel_id", "by_denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChainId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"centauri", "ratelimit", "ratelimits", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChannelId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"centauri", "ratelimit", "ratelimits", "channel_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllWhitelistedAddresses_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"centauri", "ratelimit", "whitelisted_addresses"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllRateLimits_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimit_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChainId_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChannelId_0 = runtime.ForwardResponseMessage + + forward_Query_AllWhitelistedAddresses_0 = runtime.ForwardResponseMessage +) diff --git a/x/ratelimit/types/quota.go b/x/ratelimit/types/quota.go new file mode 100644 index 000000000..8bfa24581 --- /dev/null +++ b/x/ratelimit/types/quota.go @@ -0,0 +1,22 @@ +package types + +import ( + "cosmossdk.io/math" +) + +// CheckExceedsQuota checks if new in/out flow is going to reach the max in/out or not +func (q *Quota) CheckExceedsQuota(direction PacketDirection, amount math.Int, totalValue math.Int) bool { + // If there's no channel value (this should be almost impossible), it means there is no + // supply of the asset, so we shoudn't prevent inflows/outflows + if totalValue.IsZero() { + return false + } + var threshold math.Int + if direction == PACKET_RECV { + threshold = totalValue.Mul(q.MaxPercentRecv).Quo(math.NewInt(100)) + } else { + threshold = totalValue.Mul(q.MaxPercentSend).Quo(math.NewInt(100)) + } + + return amount.GT(threshold) +} diff --git a/x/ratelimit/types/ratelimit.pb.go b/x/ratelimit/types/ratelimit.pb.go new file mode 100644 index 000000000..af9923387 --- /dev/null +++ b/x/ratelimit/types/ratelimit.pb.go @@ -0,0 +1,1446 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/ratelimit.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PacketDirection int32 + +const ( + PACKET_SEND PacketDirection = 0 + PACKET_RECV PacketDirection = 1 +) + +var PacketDirection_name = map[int32]string{ + 0: "PACKET_SEND", + 1: "PACKET_RECV", +} + +var PacketDirection_value = map[string]int32{ + "PACKET_SEND": 0, + "PACKET_RECV": 1, +} + +func (x PacketDirection) String() string { + return proto.EnumName(PacketDirection_name, int32(x)) +} + +func (PacketDirection) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{0} +} + +type Path struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{0} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(m, src) +} +func (m *Path) XXX_Size() int { + return m.Size() +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *Path) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type Quota struct { + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,3,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{1} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quota.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *Quota) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type Flow struct { + Inflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=inflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"inflow"` + Outflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=outflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"outflow"` + ChannelValue github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,3,opt,name=channel_value,json=channelValue,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"channel_value"` +} + +func (m *Flow) Reset() { *m = Flow{} } +func (m *Flow) String() string { return proto.CompactTextString(m) } +func (*Flow) ProtoMessage() {} +func (*Flow) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{2} +} +func (m *Flow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Flow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Flow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Flow.Merge(m, src) +} +func (m *Flow) XXX_Size() int { + return m.Size() +} +func (m *Flow) XXX_DiscardUnknown() { + xxx_messageInfo_Flow.DiscardUnknown(m) +} + +var xxx_messageInfo_Flow proto.InternalMessageInfo + +type RateLimit struct { + Path *Path `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Quota *Quota `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Flow *Flow `protobuf:"bytes,3,opt,name=flow,proto3" json:"flow,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (m *RateLimit) String() string { return proto.CompactTextString(m) } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{3} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + +func (m *RateLimit) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *RateLimit) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *RateLimit) GetFlow() *Flow { + if m != nil { + return m.Flow + } + return nil +} + +type WhitelistedAddressPair struct { + Sender string `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` + Receiver string `protobuf:"bytes,2,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *WhitelistedAddressPair) Reset() { *m = WhitelistedAddressPair{} } +func (m *WhitelistedAddressPair) String() string { return proto.CompactTextString(m) } +func (*WhitelistedAddressPair) ProtoMessage() {} +func (*WhitelistedAddressPair) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{4} +} +func (m *WhitelistedAddressPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WhitelistedAddressPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WhitelistedAddressPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WhitelistedAddressPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_WhitelistedAddressPair.Merge(m, src) +} +func (m *WhitelistedAddressPair) XXX_Size() int { + return m.Size() +} +func (m *WhitelistedAddressPair) XXX_DiscardUnknown() { + xxx_messageInfo_WhitelistedAddressPair.DiscardUnknown(m) +} + +var xxx_messageInfo_WhitelistedAddressPair proto.InternalMessageInfo + +func (m *WhitelistedAddressPair) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *WhitelistedAddressPair) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +func init() { + proto.RegisterEnum("centauri.ratelimit.v1beta1.PacketDirection", PacketDirection_name, PacketDirection_value) + proto.RegisterType((*Path)(nil), "centauri.ratelimit.v1beta1.Path") + proto.RegisterType((*Quota)(nil), "centauri.ratelimit.v1beta1.Quota") + proto.RegisterType((*Flow)(nil), "centauri.ratelimit.v1beta1.Flow") + proto.RegisterType((*RateLimit)(nil), "centauri.ratelimit.v1beta1.RateLimit") + proto.RegisterType((*WhitelistedAddressPair)(nil), "centauri.ratelimit.v1beta1.WhitelistedAddressPair") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/ratelimit.proto", fileDescriptor_825b72046a6cedeb) +} + +var fileDescriptor_825b72046a6cedeb = []byte{ + // 516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xdd, 0x6a, 0x13, 0x41, + 0x14, 0xde, 0xb1, 0x69, 0x34, 0x27, 0xb6, 0x8d, 0x43, 0x29, 0x21, 0xe0, 0x36, 0x06, 0x94, 0x52, + 0x71, 0x43, 0xab, 0x20, 0xe2, 0x55, 0x7f, 0x52, 0x5a, 0x2c, 0x12, 0x37, 0x52, 0xc5, 0x9b, 0x30, + 0xd9, 0x39, 0x66, 0x87, 0x66, 0x77, 0xe2, 0xec, 0x6c, 0x1a, 0xdf, 0xc0, 0x4b, 0xdf, 0xc1, 0x17, + 0xf0, 0x31, 0x7a, 0xd9, 0x4b, 0xf1, 0xa2, 0x48, 0x72, 0xed, 0x3b, 0xc8, 0xcc, 0x6e, 0x6c, 0x11, + 0x54, 0xac, 0x57, 0xbb, 0xe7, 0x9b, 0xef, 0x7c, 0x33, 0xdf, 0xcc, 0x77, 0x60, 0x3d, 0xc0, 0x58, + 0xb3, 0x54, 0x89, 0xa6, 0x62, 0x1a, 0x07, 0x22, 0x12, 0xba, 0x39, 0xda, 0xe8, 0xa1, 0x66, 0x1b, + 0x17, 0x88, 0x37, 0x54, 0x52, 0x4b, 0x5a, 0x9b, 0x71, 0xbd, 0x8b, 0x95, 0x9c, 0x5b, 0x5b, 0xee, + 0xcb, 0xbe, 0xb4, 0xb4, 0xa6, 0xf9, 0xcb, 0x3a, 0x1a, 0x4f, 0xa1, 0xd0, 0x66, 0x3a, 0xa4, 0xcb, + 0x30, 0xcf, 0x31, 0x96, 0x51, 0x95, 0xd4, 0xc9, 0x5a, 0xc9, 0xcf, 0x0a, 0x7a, 0x1b, 0x20, 0x08, + 0x59, 0x1c, 0xe3, 0xa0, 0x2b, 0x78, 0xf5, 0x9a, 0x5d, 0x2a, 0xe5, 0xc8, 0x01, 0x6f, 0x4c, 0x08, + 0xcc, 0xbf, 0x48, 0xa5, 0x66, 0xf4, 0x35, 0x54, 0x22, 0x36, 0xee, 0x0e, 0x51, 0x99, 0x13, 0x74, + 0x13, 0x8c, 0x79, 0xa6, 0xb4, 0xed, 0x9d, 0x9e, 0xaf, 0x3a, 0x5f, 0xcf, 0x57, 0xef, 0xf5, 0x85, + 0x0e, 0xd3, 0x9e, 0x17, 0xc8, 0xa8, 0x19, 0xc8, 0x24, 0x92, 0x49, 0xfe, 0x79, 0x90, 0xf0, 0xe3, + 0xa6, 0x7e, 0x3f, 0xc4, 0xc4, 0x3b, 0x88, 0xb5, 0xbf, 0x18, 0xb1, 0x71, 0x3b, 0x93, 0xe9, 0x60, + 0xcc, 0x7f, 0x55, 0x56, 0x18, 0x8c, 0xb2, 0x83, 0xfc, 0x8f, 0xb2, 0x8f, 0xc1, 0x88, 0xde, 0x85, + 0x45, 0x9e, 0x2a, 0xa6, 0x85, 0x8c, 0xbb, 0xa1, 0x4c, 0x55, 0x52, 0x9d, 0xab, 0x93, 0xb5, 0x82, + 0xbf, 0x30, 0x43, 0xf7, 0x0d, 0xd8, 0xf8, 0x4e, 0xa0, 0xb0, 0x37, 0x90, 0x27, 0x74, 0x0f, 0x8a, + 0x22, 0x7e, 0x3b, 0x90, 0x27, 0x57, 0x74, 0x96, 0x77, 0xd3, 0x7d, 0xb8, 0x2e, 0x53, 0x6d, 0x85, + 0xae, 0x66, 0x64, 0xd6, 0x4e, 0x3b, 0xb0, 0x30, 0x7b, 0x9e, 0x11, 0x1b, 0xa4, 0x68, 0x0d, 0xfc, + 0xbb, 0xde, 0xcd, 0x5c, 0xe4, 0xc8, 0x68, 0x34, 0x3e, 0x13, 0x28, 0xf9, 0x4c, 0xe3, 0xa1, 0x49, + 0x0f, 0x7d, 0x04, 0x85, 0x21, 0xd3, 0xa1, 0xb5, 0x5c, 0xde, 0xac, 0x7b, 0xbf, 0x0f, 0x98, 0x67, + 0x72, 0xe4, 0x5b, 0x36, 0x7d, 0x0c, 0xf3, 0xef, 0x4c, 0x2e, 0xac, 0xc1, 0xf2, 0xe6, 0x9d, 0x3f, + 0xb5, 0xd9, 0x00, 0xf9, 0x19, 0xdf, 0x6c, 0x67, 0x2f, 0x66, 0xee, 0xef, 0xdb, 0x99, 0x37, 0xf1, + 0x2d, 0xbb, 0x71, 0x08, 0x2b, 0xaf, 0x42, 0x61, 0x08, 0x89, 0x46, 0xbe, 0xc5, 0xb9, 0xc2, 0x24, + 0x69, 0x33, 0xa1, 0xe8, 0x0a, 0x14, 0x4d, 0x16, 0x51, 0xe5, 0xb9, 0xce, 0x2b, 0x5a, 0x83, 0x1b, + 0x0a, 0x03, 0x14, 0x23, 0x54, 0x79, 0xac, 0x7f, 0xd6, 0xeb, 0x4f, 0x60, 0xa9, 0xcd, 0x82, 0x63, + 0xd4, 0xbb, 0x42, 0x61, 0x60, 0x82, 0x40, 0x97, 0xa0, 0xdc, 0xde, 0xda, 0x79, 0xd6, 0x7a, 0xd9, + 0xed, 0xb4, 0x9e, 0xef, 0x56, 0x9c, 0x4b, 0x80, 0xdf, 0xda, 0x39, 0xaa, 0x90, 0x5a, 0xe1, 0xc3, + 0x27, 0xd7, 0xd9, 0xbe, 0x7f, 0x3a, 0x71, 0xc9, 0xd9, 0xc4, 0x25, 0xdf, 0x26, 0x2e, 0xf9, 0x38, + 0x75, 0x9d, 0xb3, 0xa9, 0xeb, 0x7c, 0x99, 0xba, 0xce, 0x9b, 0x5b, 0xe3, 0x4b, 0xe3, 0x6b, 0xaf, + 0xbe, 0x57, 0xb4, 0x13, 0xf8, 0xf0, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0xf5, 0x58, 0x11, + 0xe1, 0x03, 0x00, 0x00, +} + +func (m *Path) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Path) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Path) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintRatelimit(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x18 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Flow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Flow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ChannelValue.Size() + i -= size + if _, err := m.ChannelValue.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.Outflow.Size() + i -= size + if _, err := m.Outflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Inflow.Size() + i -= size + if _, err := m.Inflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flow != nil { + { + size, err := m.Flow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WhitelistedAddressPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WhitelistedAddressPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WhitelistedAddressPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x12 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRatelimit(dAtA []byte, offset int, v uint64) int { + offset -= sovRatelimit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Path) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MaxPercentSend.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovRatelimit(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovRatelimit(uint64(m.DurationHours)) + } + return n +} + +func (m *Flow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Inflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.Outflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.ChannelValue.Size() + n += 1 + l + sovRatelimit(uint64(l)) + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Flow != nil { + l = m.Flow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func (m *WhitelistedAddressPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func sovRatelimit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRatelimit(x uint64) (n int) { + return sovRatelimit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &Quota{} + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flow == nil { + m.Flow = &Flow{} + } + if err := m.Flow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WhitelistedAddressPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WhitelistedAddressPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WhitelistedAddressPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRatelimit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRatelimit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRatelimit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRatelimit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRatelimit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRatelimit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRatelimit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/tx.pb.go b/x/ratelimit/types/tx.pb.go new file mode 100644 index 000000000..cc47a6df0 --- /dev/null +++ b/x/ratelimit/types/tx.pb.go @@ -0,0 +1,2196 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgAddRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgAddRateLimit) Reset() { *m = MsgAddRateLimit{} } +func (m *MsgAddRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimit) ProtoMessage() {} +func (*MsgAddRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{0} +} +func (m *MsgAddRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimit.Merge(m, src) +} +func (m *MsgAddRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimit proto.InternalMessageInfo + +func (m *MsgAddRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgAddRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgAddRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *MsgAddRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type MsgAddRateLimitResponse struct { +} + +func (m *MsgAddRateLimitResponse) Reset() { *m = MsgAddRateLimitResponse{} } +func (m *MsgAddRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimitResponse) ProtoMessage() {} +func (*MsgAddRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{1} +} +func (m *MsgAddRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimitResponse.Merge(m, src) +} +func (m *MsgAddRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimitResponse proto.InternalMessageInfo + +type MsgUpdateRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgUpdateRateLimit) Reset() { *m = MsgUpdateRateLimit{} } +func (m *MsgUpdateRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimit) ProtoMessage() {} +func (*MsgUpdateRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{2} +} +func (m *MsgUpdateRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimit.Merge(m, src) +} +func (m *MsgUpdateRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimit proto.InternalMessageInfo + +func (m *MsgUpdateRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgUpdateRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type MsgUpdateRateLimitResponse struct { +} + +func (m *MsgUpdateRateLimitResponse) Reset() { *m = MsgUpdateRateLimitResponse{} } +func (m *MsgUpdateRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimitResponse) ProtoMessage() {} +func (*MsgUpdateRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{3} +} +func (m *MsgUpdateRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimitResponse.Merge(m, src) +} +func (m *MsgUpdateRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimitResponse proto.InternalMessageInfo + +type MsgRemoveRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *MsgRemoveRateLimit) Reset() { *m = MsgRemoveRateLimit{} } +func (m *MsgRemoveRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimit) ProtoMessage() {} +func (*MsgRemoveRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{4} +} +func (m *MsgRemoveRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimit.Merge(m, src) +} +func (m *MsgRemoveRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimit proto.InternalMessageInfo + +func (m *MsgRemoveRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgRemoveRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgRemoveRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type MsgRemoveRateLimitResponse struct { +} + +func (m *MsgRemoveRateLimitResponse) Reset() { *m = MsgRemoveRateLimitResponse{} } +func (m *MsgRemoveRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimitResponse) ProtoMessage() {} +func (*MsgRemoveRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{5} +} +func (m *MsgRemoveRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimitResponse.Merge(m, src) +} +func (m *MsgRemoveRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimitResponse proto.InternalMessageInfo + +type MsgResetRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *MsgResetRateLimit) Reset() { *m = MsgResetRateLimit{} } +func (m *MsgResetRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimit) ProtoMessage() {} +func (*MsgResetRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{6} +} +func (m *MsgResetRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimit.Merge(m, src) +} +func (m *MsgResetRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimit proto.InternalMessageInfo + +func (m *MsgResetRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgResetRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgResetRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type MsgResetRateLimitResponse struct { +} + +func (m *MsgResetRateLimitResponse) Reset() { *m = MsgResetRateLimitResponse{} } +func (m *MsgResetRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimitResponse) ProtoMessage() {} +func (*MsgResetRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{7} +} +func (m *MsgResetRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimitResponse.Merge(m, src) +} +func (m *MsgResetRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgAddRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgAddRateLimit") + proto.RegisterType((*MsgAddRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgAddRateLimitResponse") + proto.RegisterType((*MsgUpdateRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgUpdateRateLimit") + proto.RegisterType((*MsgUpdateRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgUpdateRateLimitResponse") + proto.RegisterType((*MsgRemoveRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgRemoveRateLimit") + proto.RegisterType((*MsgRemoveRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgRemoveRateLimitResponse") + proto.RegisterType((*MsgResetRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgResetRateLimit") + proto.RegisterType((*MsgResetRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgResetRateLimitResponse") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/tx.proto", fileDescriptor_6b20911f56917b5f) +} + +var fileDescriptor_6b20911f56917b5f = []byte{ + // 534 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0xc1, 0x8a, 0xd3, 0x40, + 0x1c, 0xc6, 0x9b, 0xdd, 0xee, 0x42, 0x07, 0xac, 0xbb, 0x43, 0xb1, 0xd9, 0xa8, 0xe9, 0x12, 0x51, + 0x16, 0x97, 0x4d, 0xe8, 0x2e, 0x7a, 0xd8, 0xdb, 0xee, 0xc9, 0x05, 0x0b, 0x12, 0x15, 0xc4, 0x4b, + 0x99, 0x66, 0xc6, 0x34, 0xd8, 0xcc, 0x84, 0x99, 0x49, 0x48, 0x2f, 0x82, 0x5e, 0xbc, 0x28, 0xf8, + 0x28, 0x3e, 0xc6, 0x1e, 0xf7, 0x28, 0x1e, 0x8a, 0xb4, 0x88, 0x9e, 0x7d, 0x02, 0x49, 0xd2, 0xb4, + 0x6b, 0x42, 0x29, 0x45, 0x44, 0x0f, 0x9e, 0x26, 0x99, 0xff, 0x97, 0x6f, 0x7e, 0x33, 0xdf, 0x4c, + 0x06, 0xdc, 0x72, 0x08, 0x95, 0x28, 0xe4, 0x9e, 0xc5, 0x91, 0x24, 0x03, 0xcf, 0xf7, 0xa4, 0x15, + 0xb5, 0x7b, 0x44, 0xa2, 0xb6, 0x25, 0x63, 0x33, 0xe0, 0x4c, 0x32, 0xa8, 0xe5, 0x22, 0x73, 0x26, + 0x32, 0xa7, 0x22, 0xad, 0xe1, 0x32, 0x97, 0xa5, 0x32, 0x2b, 0x79, 0xca, 0xbe, 0xd0, 0x9a, 0x0e, + 0x13, 0x3e, 0x13, 0x96, 0x2f, 0x5c, 0x2b, 0x6a, 0x27, 0x4d, 0x56, 0x30, 0xbe, 0xae, 0x81, 0xab, + 0x1d, 0xe1, 0x9e, 0x60, 0x6c, 0x23, 0x49, 0x1e, 0x26, 0x56, 0xf0, 0x10, 0xd4, 0x50, 0x28, 0xfb, + 0x8c, 0x7b, 0x72, 0xa8, 0x2a, 0xbb, 0xca, 0x5e, 0xed, 0xb4, 0xf1, 0x63, 0xd4, 0xda, 0x1a, 0x22, + 0x7f, 0x70, 0x6c, 0xcc, 0x4a, 0x86, 0x3d, 0x97, 0xc1, 0x06, 0xd8, 0xc0, 0x84, 0x32, 0x5f, 0x5d, + 0x4b, 0xf4, 0x76, 0xf6, 0x02, 0x6f, 0x02, 0xe0, 0xf4, 0x11, 0xa5, 0x64, 0xd0, 0xf5, 0xb0, 0xba, + 0x9e, 0x96, 0x6a, 0xd3, 0x9e, 0x33, 0x0c, 0x9f, 0x81, 0x2d, 0x1f, 0xc5, 0xdd, 0x80, 0xf0, 0x64, + 0x42, 0x5d, 0x41, 0x28, 0x56, 0xab, 0xe9, 0x78, 0xe6, 0xf9, 0xa8, 0x55, 0xf9, 0x3c, 0x6a, 0xdd, + 0x71, 0x3d, 0xd9, 0x0f, 0x7b, 0xa6, 0xc3, 0x7c, 0x6b, 0x3a, 0x85, 0xac, 0x39, 0x10, 0xf8, 0xa5, + 0x25, 0x87, 0x01, 0x11, 0xe6, 0x19, 0x95, 0x76, 0xdd, 0x47, 0xf1, 0xa3, 0xcc, 0xe6, 0x31, 0xa1, + 0x25, 0x67, 0x4e, 0x9c, 0x48, 0xdd, 0xf8, 0x5d, 0x67, 0x9b, 0x38, 0x11, 0xbc, 0x0d, 0xea, 0x38, + 0xe4, 0x48, 0x7a, 0x8c, 0x76, 0xfb, 0x2c, 0xe4, 0x42, 0xdd, 0xdc, 0x55, 0xf6, 0xaa, 0xf6, 0x95, + 0xbc, 0xf7, 0x41, 0xd2, 0x79, 0x5c, 0x7f, 0xf3, 0xed, 0xe3, 0xdd, 0xf9, 0xfa, 0x18, 0x3b, 0xa0, + 0x59, 0x58, 0x66, 0x9b, 0x88, 0x80, 0x51, 0x41, 0x8c, 0xef, 0x6b, 0x00, 0x76, 0x84, 0xfb, 0x34, + 0xc0, 0x48, 0x92, 0xff, 0x29, 0xfc, 0xc9, 0x14, 0x6e, 0x00, 0xad, 0xbc, 0xd2, 0xb3, 0x20, 0xde, + 0x2b, 0x69, 0x10, 0x36, 0xf1, 0x59, 0xf4, 0x17, 0x82, 0x58, 0x40, 0x5b, 0xc0, 0x99, 0xd1, 0xbe, + 0x53, 0xc0, 0x76, 0x5a, 0x16, 0x44, 0xfe, 0x03, 0xb0, 0xd7, 0xc1, 0x4e, 0x89, 0x26, 0x67, 0x3d, + 0x7c, 0x5b, 0x05, 0xeb, 0x1d, 0xe1, 0xc2, 0x18, 0x34, 0x4e, 0x30, 0x7e, 0xc2, 0x11, 0x15, 0x2f, + 0x08, 0x9f, 0x53, 0xef, 0x9b, 0x8b, 0xff, 0x68, 0x66, 0xe1, 0xdc, 0x68, 0x47, 0x2b, 0x88, 0x73, + 0x02, 0xf8, 0x5a, 0x01, 0xcd, 0x2c, 0xf7, 0xf2, 0xe8, 0xe6, 0x12, 0xc3, 0xc2, 0x7e, 0xd1, 0xee, + 0xaf, 0xa6, 0xff, 0x85, 0x21, 0x4b, 0x73, 0x75, 0x86, 0xc2, 0x2e, 0x58, 0xca, 0xb0, 0x60, 0xd7, + 0xc0, 0x57, 0xe0, 0x5a, 0x9a, 0x51, 0x99, 0xe0, 0x60, 0xa9, 0xe3, 0xe5, 0x68, 0xb5, 0x7b, 0x2b, + 0xc9, 0xf3, 0xf1, 0x4f, 0xf7, 0xcf, 0xc7, 0xba, 0x72, 0x31, 0xd6, 0x95, 0x2f, 0x63, 0x5d, 0xf9, + 0x30, 0xd1, 0x2b, 0x17, 0x13, 0xbd, 0xf2, 0x69, 0xa2, 0x57, 0x9e, 0x6f, 0xc7, 0x97, 0xae, 0xbc, + 0xf4, 0xe4, 0xf7, 0x36, 0xd3, 0x3b, 0xea, 0xe8, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x59, + 0x49, 0x80, 0x15, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + AddTransferRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) + UpdateTransferRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) + RemoveTransferRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) + ResetTransferRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) AddTransferRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) { + out := new(MsgAddRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/AddTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateTransferRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) { + out := new(MsgUpdateRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/UpdateTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RemoveTransferRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) { + out := new(MsgRemoveRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/RemoveTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ResetTransferRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) { + out := new(MsgResetRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/ResetTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + AddTransferRateLimit(context.Context, *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) + UpdateTransferRateLimit(context.Context, *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) + RemoveTransferRateLimit(context.Context, *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) + ResetTransferRateLimit(context.Context, *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) AddTransferRateLimit(ctx context.Context, req *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) UpdateTransferRateLimit(ctx context.Context, req *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) RemoveTransferRateLimit(ctx context.Context, req *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) ResetTransferRateLimit(ctx context.Context, req *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetTransferRateLimit not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_AddTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAddRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).AddTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/AddTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).AddTransferRateLimit(ctx, req.(*MsgAddRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/UpdateTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateTransferRateLimit(ctx, req.(*MsgUpdateRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RemoveTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRemoveRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RemoveTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/RemoveTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RemoveTransferRateLimit(ctx, req.(*MsgRemoveRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ResetTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgResetRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ResetTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/ResetTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ResetTransferRateLimit(ctx, req.(*MsgResetRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "centauri.ratelimit.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddTransferRateLimit", + Handler: _Msg_AddTransferRateLimit_Handler, + }, + { + MethodName: "UpdateTransferRateLimit", + Handler: _Msg_UpdateTransferRateLimit_Handler, + }, + { + MethodName: "RemoveTransferRateLimit", + Handler: _Msg_RemoveTransferRateLimit_Handler, + }, + { + MethodName: "ResetTransferRateLimit", + Handler: _Msg_ResetTransferRateLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "centauri/ratelimit/v1beta1/tx.proto", +} + +func (m *MsgAddRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgAddRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgAddRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgAddRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgUpdateRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRemoveRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRemoveRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgResetRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgResetRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgAddRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAddRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/transfermiddleware/client/cli/cli.go b/x/transfermiddleware/client/cli/cli.go index 624adc6a1..01e9a86f4 100644 --- a/x/transfermiddleware/client/cli/cli.go +++ b/x/transfermiddleware/client/cli/cli.go @@ -6,7 +6,7 @@ import ( "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/version" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" "github.com/spf13/cobra" ) diff --git a/x/transfermiddleware/client/cli/tx.go b/x/transfermiddleware/client/cli/tx.go index 979780724..6ef982dc8 100644 --- a/x/transfermiddleware/client/cli/tx.go +++ b/x/transfermiddleware/client/cli/tx.go @@ -7,7 +7,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/version" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" "github.com/spf13/cobra" ) diff --git a/x/transfermiddleware/ibc_ante_test.go b/x/transfermiddleware/ibc_ante_test.go index 6b2c7eca4..8009f1ac0 100644 --- a/x/transfermiddleware/ibc_ante_test.go +++ b/x/transfermiddleware/ibc_ante_test.go @@ -10,7 +10,7 @@ import ( "github.com/cosmos/ibc-go/v7/modules/core/exported" wasmkeeper "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" wasmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/types" - customibctesting "github.com/notional-labs/centauri/v3/app/ibctesting" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" "github.com/stretchr/testify/suite" ) diff --git a/x/transfermiddleware/ibc_middleware.go b/x/transfermiddleware/ibc_middleware.go index 0b8809f84..1df5eb7d6 100644 --- a/x/transfermiddleware/ibc_middleware.go +++ b/x/transfermiddleware/ibc_middleware.go @@ -10,7 +10,7 @@ import ( channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" ) var _ porttypes.Middleware = &IBCMiddleware{} diff --git a/x/transfermiddleware/keeper/abci.go b/x/transfermiddleware/keeper/abci.go new file mode 100644 index 000000000..b5811a40f --- /dev/null +++ b/x/transfermiddleware/keeper/abci.go @@ -0,0 +1,20 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" +) + +// BeginBlocker of epochs module. +func (k Keeper) BeginBlocker(ctx sdk.Context) { + // Iterate over remove list + k.IterateRemoveListInfo(ctx, func(removeList types.RemoveParachainIBCTokenInfo) (stop bool) { + // If pass the duration, remove parachain token info + if ctx.BlockTime().After(removeList.RemoveTime) { + k.RemoveParachainIBCInfo(ctx, removeList.NativeDenom) + } + + return false + }) + +} diff --git a/x/transfermiddleware/keeper/genesis.go b/x/transfermiddleware/keeper/genesis.go index 9d838a292..84639e543 100644 --- a/x/transfermiddleware/keeper/genesis.go +++ b/x/transfermiddleware/keeper/genesis.go @@ -2,7 +2,7 @@ package keeper import ( sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) // TODO: add init genesis logic @@ -11,6 +11,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, genState types.GenesisState) { for _, tokenInfo := range genState.TokenInfos { k.AddParachainIBCInfo(ctx, tokenInfo.IbcDenom, tokenInfo.ChannelId, tokenInfo.NativeDenom, tokenInfo.AssetId) } + k.SetParams(ctx, genState.Params) } // IterateParaTokenInfos iterate through all parachain token info. @@ -47,5 +48,6 @@ func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { return &types.GenesisState{ TokenInfos: infos, + Params: k.GetParams(ctx), } } diff --git a/x/transfermiddleware/keeper/genesis_test.go b/x/transfermiddleware/keeper/genesis_test.go index c532151e3..984cb3aca 100644 --- a/x/transfermiddleware/keeper/genesis_test.go +++ b/x/transfermiddleware/keeper/genesis_test.go @@ -3,8 +3,8 @@ package keeper_test import ( "testing" - helpers "github.com/notional-labs/centauri/v3/app/helpers" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + helpers "github.com/notional-labs/centauri/v4/app/helpers" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" "github.com/stretchr/testify/require" ) diff --git a/x/transfermiddleware/keeper/grpc_query.go b/x/transfermiddleware/keeper/grpc_query.go index 52609b9fd..696c09815 100644 --- a/x/transfermiddleware/keeper/grpc_query.go +++ b/x/transfermiddleware/keeper/grpc_query.go @@ -5,7 +5,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) func (k Keeper) ParaTokenInfo(c context.Context, req *types.QueryParaTokenInfoRequest) (*types.QueryParaTokenInfoResponse, error) { diff --git a/x/transfermiddleware/keeper/ics4wrapper.go b/x/transfermiddleware/keeper/ics4wrapper.go index 969221dae..65b9d8d92 100644 --- a/x/transfermiddleware/keeper/ics4wrapper.go +++ b/x/transfermiddleware/keeper/ics4wrapper.go @@ -11,7 +11,7 @@ import ( clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) func (keeper Keeper) hasParachainIBCTokenInfo(ctx sdk.Context, nativeDenom string) bool { diff --git a/x/transfermiddleware/keeper/keeper.go b/x/transfermiddleware/keeper/keeper.go index 2765ffb1b..88423fc5b 100644 --- a/x/transfermiddleware/keeper/keeper.go +++ b/x/transfermiddleware/keeper/keeper.go @@ -1,20 +1,25 @@ package keeper import ( + "time" + errorsmod "cosmossdk.io/errors" "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" "github.com/cosmos/ibc-go/v7/modules/core/exported" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) type Keeper struct { cdc codec.BinaryCodec storeKey storetypes.StoreKey + paramSpace paramtypes.Subspace ICS4Wrapper porttypes.ICS4Wrapper bankKeeper types.BankKeeper transferKeeper types.TransferKeeper @@ -27,14 +32,21 @@ type Keeper struct { // NewKeeper returns a new instance of the x/ibchooks keeper func NewKeeper( storeKey storetypes.StoreKey, + paramSpace paramtypes.Subspace, codec codec.BinaryCodec, ics4Wrapper porttypes.ICS4Wrapper, transferKeeper types.TransferKeeper, bankKeeper types.BankKeeper, authority string, ) Keeper { + // set KeyTable if it has not already been set + if !paramSpace.HasKeyTable() { + paramSpace = paramSpace.WithKeyTable(types.ParamKeyTable()) + } + return Keeper{ storeKey: storeKey, + paramSpace: paramSpace, transferKeeper: transferKeeper, bankKeeper: bankKeeper, cdc: codec, @@ -75,6 +87,47 @@ func (keeper Keeper) AddParachainIBCInfo(ctx sdk.Context, ibcDenom, channelID, n return nil } +// TODO: testing +// AddParachainIBCInfoToRemoveList add parachain token information token to remove list. +func (keeper Keeper) AddParachainIBCInfoToRemoveList(ctx sdk.Context, nativeDenom string) (time.Time, error) { + params := keeper.GetParams(ctx) + store := ctx.KVStore(keeper.storeKey) + if !store.Has(types.GetKeyParachainIBCTokenInfoByNativeDenom(nativeDenom)) { + return time.Time{}, errorsmod.Wrapf(sdkerrors.ErrKeyNotFound, "Token %v info not found", nativeDenom) + } + + // Add to remove list + removeTime := ctx.BlockTime().Add(params.Duration) + removeToken := types.RemoveParachainIBCTokenInfo{ + NativeDenom: nativeDenom, + RemoveTime: removeTime, + } + + bz, err := keeper.cdc.Marshal(&removeToken) + if err != nil { + return time.Time{}, err + } + + store.Set(types.GetKeyParachainIBCTokenRemoveListByNativeDenom(nativeDenom), bz) + return removeTime, nil +} + +// TODO: testing +// IterateRemoveListInfo iterate all parachain token in remove list. +func (keeper Keeper) IterateRemoveListInfo(ctx sdk.Context, cb func(removeInfo types.RemoveParachainIBCTokenInfo) (stop bool)) { + store := ctx.KVStore(keeper.storeKey) + iterator := sdk.KVStorePrefixIterator(store, types.KeyParachainIBCTokenRemoveListByNativeDenom) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var removeInfo types.RemoveParachainIBCTokenInfo + keeper.cdc.MustUnmarshal(iterator.Value(), &removeInfo) + if cb(removeInfo) { + break + } + } +} + // TODO: testing // RemoveParachainIBCTokenInfo remove parachain token information from chain state. func (keeper Keeper) RemoveParachainIBCInfo(ctx sdk.Context, nativeDenom string) error { diff --git a/x/transfermiddleware/keeper/msg_server.go b/x/transfermiddleware/keeper/msg_server.go index 60dbb250e..e089d4a1d 100644 --- a/x/transfermiddleware/keeper/msg_server.go +++ b/x/transfermiddleware/keeper/msg_server.go @@ -8,7 +8,7 @@ import ( "cosmossdk.io/errors" govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) var _ types.MsgServer = msgServer{} @@ -55,7 +55,7 @@ func (ms msgServer) RemoveParachainIBCTokenInfo(goCtx context.Context, req *type return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", ms.authority, req.Authority) } - err := ms.RemoveParachainIBCInfo(ctx, req.NativeDenom) + removeTime, err := ms.AddParachainIBCInfoToRemoveList(ctx, req.NativeDenom) if err != nil { return nil, err } @@ -64,6 +64,7 @@ func (ms msgServer) RemoveParachainIBCTokenInfo(goCtx context.Context, req *type sdk.NewEvent( types.EventRemoveParachainIBCTokenInfo, sdk.NewAttribute(types.AttributeKeyNativeDenom, req.NativeDenom), + sdk.NewAttribute(types.AttributeKeyRemoveTime, removeTime.String()), ), }) diff --git a/x/transfermiddleware/keeper/params.go b/x/transfermiddleware/keeper/params.go new file mode 100644 index 000000000..49755cbfc --- /dev/null +++ b/x/transfermiddleware/keeper/params.go @@ -0,0 +1,15 @@ +package keeper + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" +) + +func (k Keeper) GetParams(ctx sdk.Context) (params types.Params) { + k.paramSpace.GetParamSet(ctx, ¶ms) + return params +} + +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramSpace.SetParamSet(ctx, ¶ms) +} diff --git a/x/transfermiddleware/keeper/relay.go b/x/transfermiddleware/keeper/relay.go index f7861cd36..c7494af3e 100644 --- a/x/transfermiddleware/keeper/relay.go +++ b/x/transfermiddleware/keeper/relay.go @@ -5,7 +5,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" ) func (k Keeper) OnRecvPacket(ctx sdk.Context, packet channeltypes.Packet, data transfertypes.FungibleTokenPacketData) error { diff --git a/x/transfermiddleware/module.go b/x/transfermiddleware/module.go index e50cebf18..92aadb7df 100644 --- a/x/transfermiddleware/module.go +++ b/x/transfermiddleware/module.go @@ -14,9 +14,9 @@ import ( simtypes "github.com/cosmos/cosmos-sdk/types/simulation" "github.com/gorilla/mux" "github.com/grpc-ecosystem/grpc-gateway/runtime" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/client/cli" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/client/cli" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/keeper" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" "github.com/spf13/cobra" ) @@ -123,7 +123,9 @@ func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.Raw func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock implements the AppModule interface -func (am AppModule) BeginBlock(_ sdk.Context, _ abci.RequestBeginBlock) {} +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.BeginBlocker(ctx) +} // EndBlock implements the AppModule interface func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { diff --git a/x/transfermiddleware/pfm_test.go b/x/transfermiddleware/pfm_test.go index 05dde85e2..af7a3784f 100644 --- a/x/transfermiddleware/pfm_test.go +++ b/x/transfermiddleware/pfm_test.go @@ -11,7 +11,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" - customibctesting "github.com/notional-labs/centauri/v3/app/ibctesting" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" "github.com/stretchr/testify/suite" ) diff --git a/x/transfermiddleware/relay_test.go b/x/transfermiddleware/relay_test.go index 158d8672f..bf610025f 100644 --- a/x/transfermiddleware/relay_test.go +++ b/x/transfermiddleware/relay_test.go @@ -8,7 +8,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ibctransfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" - customibctesting "github.com/notional-labs/centauri/v3/app/ibctesting" + customibctesting "github.com/notional-labs/centauri/v4/app/ibctesting" "github.com/stretchr/testify/suite" ) diff --git a/x/transfermiddleware/types/events.go b/x/transfermiddleware/types/events.go index 4509e190f..61938b986 100644 --- a/x/transfermiddleware/types/events.go +++ b/x/transfermiddleware/types/events.go @@ -10,4 +10,5 @@ const ( AttributeKeyIbcDenom = "ibc-denom" AttributeKeyAssetID = "asset-id" AttributeKeyRlyAdress = "rly-address" + AttributeKeyRemoveTime = "remove_time" ) diff --git a/x/transfermiddleware/types/genesis.go b/x/transfermiddleware/types/genesis.go index 24d380e4a..d33a1a2f8 100644 --- a/x/transfermiddleware/types/genesis.go +++ b/x/transfermiddleware/types/genesis.go @@ -6,7 +6,9 @@ import ( // DefaultGenesisState returns a GenesisState with "transfer" as the default PortID. func DefaultGenesisState() *GenesisState { - return &GenesisState{} + return &GenesisState{ + Params: DefaultParams(), + } } // ValidateGenesis validates the provided genesis state to ensure the diff --git a/x/transfermiddleware/types/genesis.pb.go b/x/transfermiddleware/types/genesis.pb.go index 6d93416fa..15a12add3 100644 --- a/x/transfermiddleware/types/genesis.pb.go +++ b/x/transfermiddleware/types/genesis.pb.go @@ -27,6 +27,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // initialized type GenesisState struct { TokenInfos []ParachainIBCTokenInfo `protobuf:"bytes,1,rep,name=token_infos,json=tokenInfos,proto3" json:"token_infos"` + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` } func (m *GenesisState) Reset() { *m = GenesisState{} } @@ -69,82 +70,15 @@ func (m *GenesisState) GetTokenInfos() []ParachainIBCTokenInfo { return nil } -// ParachainIBCTokenInfo represents information about transferable IBC tokens from Parachain. -type ParachainIBCTokenInfo struct { - // ibc_denom is the denomination of the ibced token transferred from the dotsama chain. - IbcDenom string `protobuf:"bytes,1,opt,name=ibc_denom,json=ibcDenom,proto3" json:"ibc_denom,omitempty" yaml:"ibc_denom"` - // channel_id is source channel in IBC connection from centauri chain - ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` - // native denom is new native minted denom in centauri chain. - NativeDenom string `protobuf:"bytes,3,opt,name=native_denom,json=nativeDenom,proto3" json:"native_denom,omitempty" yaml:"native_denom"` - // asset id is the id of the asset on Picasso - AssetId string `protobuf:"bytes,4,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty" yaml:"asset_id"` -} - -func (m *ParachainIBCTokenInfo) Reset() { *m = ParachainIBCTokenInfo{} } -func (m *ParachainIBCTokenInfo) String() string { return proto.CompactTextString(m) } -func (*ParachainIBCTokenInfo) ProtoMessage() {} -func (*ParachainIBCTokenInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_bba390f1246595d3, []int{1} -} -func (m *ParachainIBCTokenInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ParachainIBCTokenInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ParachainIBCTokenInfo.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ParachainIBCTokenInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_ParachainIBCTokenInfo.Merge(m, src) -} -func (m *ParachainIBCTokenInfo) XXX_Size() int { - return m.Size() -} -func (m *ParachainIBCTokenInfo) XXX_DiscardUnknown() { - xxx_messageInfo_ParachainIBCTokenInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_ParachainIBCTokenInfo proto.InternalMessageInfo - -func (m *ParachainIBCTokenInfo) GetIbcDenom() string { - if m != nil { - return m.IbcDenom - } - return "" -} - -func (m *ParachainIBCTokenInfo) GetChannelId() string { +func (m *GenesisState) GetParams() Params { if m != nil { - return m.ChannelId + return m.Params } - return "" -} - -func (m *ParachainIBCTokenInfo) GetNativeDenom() string { - if m != nil { - return m.NativeDenom - } - return "" -} - -func (m *ParachainIBCTokenInfo) GetAssetId() string { - if m != nil { - return m.AssetId - } - return "" + return Params{} } func init() { proto.RegisterType((*GenesisState)(nil), "centauri.transfermiddleware.v1beta1.GenesisState") - proto.RegisterType((*ParachainIBCTokenInfo)(nil), "centauri.transfermiddleware.v1beta1.ParachainIBCTokenInfo") } func init() { @@ -152,29 +86,23 @@ func init() { } var fileDescriptor_bba390f1246595d3 = []byte{ - // 347 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0x41, 0x4f, 0xea, 0x40, - 0x14, 0x85, 0xdb, 0x07, 0x79, 0x0f, 0x06, 0x92, 0xa7, 0x05, 0x22, 0x61, 0xd1, 0x92, 0x71, 0xc3, - 0xaa, 0x4d, 0x95, 0x15, 0xcb, 0x6a, 0x62, 0xba, 0x33, 0xd5, 0x95, 0x1b, 0x32, 0x6d, 0x2f, 0x30, - 0x11, 0x66, 0xb0, 0x33, 0xa2, 0xfc, 0x0b, 0x7f, 0x16, 0x4b, 0x96, 0xae, 0x1a, 0x03, 0x5b, 0x57, - 0xfd, 0x05, 0xa6, 0x1d, 0x50, 0x13, 0x59, 0xb8, 0x3b, 0xf7, 0x9e, 0xfb, 0xdd, 0xb3, 0x38, 0xc8, - 0x8d, 0x80, 0x49, 0xf2, 0x98, 0x50, 0x47, 0x26, 0x84, 0x89, 0x11, 0x24, 0x33, 0x1a, 0xc7, 0x53, - 0x78, 0x22, 0x09, 0x38, 0x0b, 0x37, 0x04, 0x49, 0x5c, 0x67, 0x0c, 0x0c, 0x04, 0x15, 0xf6, 0x3c, - 0xe1, 0x92, 0x1b, 0xa7, 0x7b, 0xc4, 0xfe, 0x89, 0xd8, 0x3b, 0xa4, 0xd3, 0x1c, 0xf3, 0x31, 0x2f, - 0xee, 0x9d, 0x5c, 0x29, 0x14, 0x3f, 0xa0, 0xfa, 0x95, 0xfa, 0x75, 0x23, 0x89, 0x04, 0x83, 0xa0, - 0x9a, 0xe4, 0xf7, 0xc0, 0x86, 0x94, 0x8d, 0xb8, 0x68, 0xeb, 0xdd, 0x52, 0xaf, 0x76, 0x36, 0xb0, - 0x7f, 0x11, 0x60, 0x5f, 0x93, 0x84, 0x44, 0x13, 0x42, 0x99, 0xef, 0x5d, 0xdc, 0xe6, 0x3f, 0x7c, - 0x36, 0xe2, 0x5e, 0x79, 0x95, 0x5a, 0x5a, 0x80, 0xe4, 0x7e, 0x21, 0xf0, 0xbb, 0x8e, 0x5a, 0x07, - 0x6f, 0x0d, 0x17, 0x55, 0x69, 0x18, 0x0d, 0x63, 0x60, 0x7c, 0xd6, 0xd6, 0xbb, 0x7a, 0xaf, 0xea, - 0x35, 0xb3, 0xd4, 0x3a, 0x5a, 0x92, 0xd9, 0x74, 0x80, 0x3f, 0x2d, 0x1c, 0x54, 0x68, 0x18, 0x5d, - 0xe6, 0xd2, 0xe8, 0x23, 0x14, 0x4d, 0x08, 0x63, 0x30, 0x1d, 0xd2, 0xb8, 0xfd, 0xa7, 0x60, 0x5a, - 0x59, 0x6a, 0x1d, 0x2b, 0xe6, 0xcb, 0xc3, 0x41, 0x75, 0x37, 0xf8, 0xb1, 0x31, 0x40, 0x75, 0x46, - 0x24, 0x5d, 0xc0, 0x2e, 0xab, 0x54, 0x70, 0x27, 0x59, 0x6a, 0x35, 0x14, 0xf7, 0xdd, 0xc5, 0x41, - 0x4d, 0x8d, 0x2a, 0xd1, 0x46, 0x15, 0x22, 0x04, 0xc8, 0x3c, 0xaf, 0x5c, 0x70, 0x8d, 0x2c, 0xb5, - 0xfe, 0x2b, 0x6e, 0xef, 0xe0, 0xe0, 0x5f, 0x21, 0xfd, 0xd8, 0xeb, 0xaf, 0x36, 0xa6, 0xbe, 0xde, - 0x98, 0xfa, 0xdb, 0xc6, 0xd4, 0x5f, 0xb6, 0xa6, 0xb6, 0xde, 0x9a, 0xda, 0xeb, 0xd6, 0xd4, 0xee, - 0x3a, 0xcf, 0x87, 0x2a, 0x96, 0xcb, 0x39, 0x88, 0xf0, 0x6f, 0x51, 0xcf, 0xf9, 0x47, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x1d, 0x03, 0xda, 0x49, 0x0e, 0x02, 0x00, 0x00, + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4c, 0x4e, 0xcd, 0x2b, + 0x49, 0x2c, 0x2d, 0xca, 0xd4, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xca, 0xcd, 0x4c, + 0x49, 0xc9, 0x49, 0x2d, 0x4f, 0x2c, 0x4a, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x86, + 0x69, 0xd1, 0xc3, 0xd4, 0xa2, 0x07, 0xd5, 0x22, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xaf, + 0x0f, 0x62, 0x41, 0xb4, 0x4a, 0x19, 0x10, 0x63, 0x5b, 0x41, 0x62, 0x51, 0x62, 0x2e, 0xd4, 0x32, + 0x29, 0x3b, 0x62, 0x75, 0x24, 0x67, 0x24, 0x66, 0xe6, 0xc5, 0x97, 0xe4, 0x67, 0xa7, 0xe6, 0xc5, + 0x67, 0xe6, 0xa5, 0x41, 0x6d, 0x54, 0xda, 0xc3, 0xc8, 0xc5, 0xe3, 0x0e, 0x71, 0x7e, 0x70, 0x49, + 0x62, 0x49, 0xaa, 0x50, 0x22, 0x17, 0x37, 0x42, 0x51, 0xb1, 0x04, 0xa3, 0x02, 0xb3, 0x06, 0xb7, + 0x91, 0x95, 0x1e, 0x11, 0x7e, 0xd2, 0x0b, 0x80, 0x59, 0xe3, 0xe9, 0xe4, 0x1c, 0x02, 0x32, 0xc3, + 0x33, 0x2f, 0x2d, 0xdf, 0x89, 0xe5, 0xc4, 0x3d, 0x79, 0x86, 0x20, 0xae, 0x12, 0x98, 0x40, 0xb1, + 0x90, 0x27, 0x17, 0x1b, 0xc4, 0x0f, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0xda, 0x44, 0x9b, + 0x9e, 0x5b, 0x0c, 0x35, 0x0e, 0x6a, 0x80, 0x93, 0xc9, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, + 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, + 0xcb, 0x31, 0x44, 0x49, 0x55, 0x60, 0x0b, 0x91, 0x92, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0xb0, + 0xdf, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xaf, 0xbd, 0x63, 0x88, 0xdd, 0x01, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -197,6 +125,16 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 if len(m.TokenInfos) > 0 { for iNdEx := len(m.TokenInfos) - 1; iNdEx >= 0; iNdEx-- { { @@ -214,57 +152,6 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *ParachainIBCTokenInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ParachainIBCTokenInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ParachainIBCTokenInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.AssetId) > 0 { - i -= len(m.AssetId) - copy(dAtA[i:], m.AssetId) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.AssetId))) - i-- - dAtA[i] = 0x22 - } - if len(m.NativeDenom) > 0 { - i -= len(m.NativeDenom) - copy(dAtA[i:], m.NativeDenom) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.NativeDenom))) - i-- - dAtA[i] = 0x1a - } - if len(m.ChannelId) > 0 { - i -= len(m.ChannelId) - copy(dAtA[i:], m.ChannelId) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.ChannelId))) - i-- - dAtA[i] = 0x12 - } - if len(m.IbcDenom) > 0 { - i -= len(m.IbcDenom) - copy(dAtA[i:], m.IbcDenom) - i = encodeVarintGenesis(dAtA, i, uint64(len(m.IbcDenom))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { offset -= sovGenesis(v) base := offset @@ -288,31 +175,8 @@ func (m *GenesisState) Size() (n int) { n += 1 + l + sovGenesis(uint64(l)) } } - return n -} - -func (m *ParachainIBCTokenInfo) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IbcDenom) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = len(m.ChannelId) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = len(m.NativeDenom) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } - l = len(m.AssetId) - if l > 0 { - n += 1 + l + sovGenesis(uint64(l)) - } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) return n } @@ -385,125 +249,11 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenesis - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ParachainIBCTokenInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ParachainIBCTokenInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ParachainIBCTokenInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IbcDenom", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IbcDenom = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ChannelId = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NativeDenom", wireType) - } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenesis @@ -513,55 +263,24 @@ func (m *ParachainIBCTokenInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenesis } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenesis } if postIndex > l { return io.ErrUnexpectedEOF } - m.NativeDenom = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AssetId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenesis - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenesis - } - if postIndex > l { - return io.ErrUnexpectedEOF + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.AssetId = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex diff --git a/x/transfermiddleware/types/info_test.go b/x/transfermiddleware/types/info_test.go index 2823385b0..52010caa0 100644 --- a/x/transfermiddleware/types/info_test.go +++ b/x/transfermiddleware/types/info_test.go @@ -3,7 +3,7 @@ package types_test import ( "testing" - "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + "github.com/notional-labs/centauri/v4/x/transfermiddleware/types" "github.com/stretchr/testify/require" ) diff --git a/x/transfermiddleware/types/keys.go b/x/transfermiddleware/types/keys.go index edf8720c9..6a1b8b9e8 100644 --- a/x/transfermiddleware/types/keys.go +++ b/x/transfermiddleware/types/keys.go @@ -18,10 +18,11 @@ const ( ) var ( - KeyParachainIBCTokenInfoByNativeDenom = []byte{0x01} - KeyParachainIBCTokenInfoByAssetID = []byte{0x02} - KeyIBCDenomAndNativeIndex = []byte{0x03} - KeyRlyAddress = []byte{0x04} + KeyParachainIBCTokenInfoByNativeDenom = []byte{0x01} + KeyParachainIBCTokenInfoByAssetID = []byte{0x02} + KeyIBCDenomAndNativeIndex = []byte{0x03} + KeyRlyAddress = []byte{0x04} + KeyParachainIBCTokenRemoveListByNativeDenom = []byte{0x05} ) func GetKeyParachainIBCTokenInfoByNativeDenom(nativeDenom string) []byte { @@ -39,3 +40,7 @@ func GetKeyNativeDenomAndIbcSecondaryIndex(ibcDenom string) []byte { func GetKeyByRlyAddress(rlyAddress string) []byte { return append(KeyRlyAddress, []byte(rlyAddress)...) } + +func GetKeyParachainIBCTokenRemoveListByNativeDenom(nativeDenom string) []byte { + return append(KeyParachainIBCTokenRemoveListByNativeDenom, []byte(nativeDenom)...) +} diff --git a/x/transfermiddleware/types/parachain_token_info.pb.go b/x/transfermiddleware/types/parachain_token_info.pb.go new file mode 100644 index 000000000..effd9f8de --- /dev/null +++ b/x/transfermiddleware/types/parachain_token_info.pb.go @@ -0,0 +1,719 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/transfermiddleware/v1beta1/parachain_token_info.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ParachainIBCTokenInfo represents information about transferable IBC tokens +// from Parachain. +type ParachainIBCTokenInfo struct { + // ibc_denom is the denomination of the ibced token transferred from the + // dotsama chain. + IbcDenom string `protobuf:"bytes,1,opt,name=ibc_denom,json=ibcDenom,proto3" json:"ibc_denom,omitempty" yaml:"ibc_denom"` + // channel_id is source channel in IBC connection from centauri chain + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` + // native denom is new native minted denom in centauri chain. + NativeDenom string `protobuf:"bytes,3,opt,name=native_denom,json=nativeDenom,proto3" json:"native_denom,omitempty" yaml:"native_denom"` + // asset id is the id of the asset on Picasso + AssetId string `protobuf:"bytes,4,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty" yaml:"asset_id"` +} + +func (m *ParachainIBCTokenInfo) Reset() { *m = ParachainIBCTokenInfo{} } +func (m *ParachainIBCTokenInfo) String() string { return proto.CompactTextString(m) } +func (*ParachainIBCTokenInfo) ProtoMessage() {} +func (*ParachainIBCTokenInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_b9cc8ea0d211210f, []int{0} +} +func (m *ParachainIBCTokenInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParachainIBCTokenInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParachainIBCTokenInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParachainIBCTokenInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParachainIBCTokenInfo.Merge(m, src) +} +func (m *ParachainIBCTokenInfo) XXX_Size() int { + return m.Size() +} +func (m *ParachainIBCTokenInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ParachainIBCTokenInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ParachainIBCTokenInfo proto.InternalMessageInfo + +func (m *ParachainIBCTokenInfo) GetIbcDenom() string { + if m != nil { + return m.IbcDenom + } + return "" +} + +func (m *ParachainIBCTokenInfo) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *ParachainIBCTokenInfo) GetNativeDenom() string { + if m != nil { + return m.NativeDenom + } + return "" +} + +func (m *ParachainIBCTokenInfo) GetAssetId() string { + if m != nil { + return m.AssetId + } + return "" +} + +type RemoveParachainIBCTokenInfo struct { + // native denom is new native minted denom in centauri chain. + NativeDenom string `protobuf:"bytes,1,opt,name=native_denom,json=nativeDenom,proto3" json:"native_denom,omitempty" yaml:"native_denom"` + // remove_time is the time at which the parachain token info will be removed. + RemoveTime time.Time `protobuf:"bytes,2,opt,name=remove_time,json=removeTime,proto3,stdtime" json:"remove_time" yaml:"start_time"` +} + +func (m *RemoveParachainIBCTokenInfo) Reset() { *m = RemoveParachainIBCTokenInfo{} } +func (m *RemoveParachainIBCTokenInfo) String() string { return proto.CompactTextString(m) } +func (*RemoveParachainIBCTokenInfo) ProtoMessage() {} +func (*RemoveParachainIBCTokenInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_b9cc8ea0d211210f, []int{1} +} +func (m *RemoveParachainIBCTokenInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RemoveParachainIBCTokenInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RemoveParachainIBCTokenInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RemoveParachainIBCTokenInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_RemoveParachainIBCTokenInfo.Merge(m, src) +} +func (m *RemoveParachainIBCTokenInfo) XXX_Size() int { + return m.Size() +} +func (m *RemoveParachainIBCTokenInfo) XXX_DiscardUnknown() { + xxx_messageInfo_RemoveParachainIBCTokenInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_RemoveParachainIBCTokenInfo proto.InternalMessageInfo + +func (m *RemoveParachainIBCTokenInfo) GetNativeDenom() string { + if m != nil { + return m.NativeDenom + } + return "" +} + +func (m *RemoveParachainIBCTokenInfo) GetRemoveTime() time.Time { + if m != nil { + return m.RemoveTime + } + return time.Time{} +} + +func init() { + proto.RegisterType((*ParachainIBCTokenInfo)(nil), "centauri.transfermiddleware.v1beta1.ParachainIBCTokenInfo") + proto.RegisterType((*RemoveParachainIBCTokenInfo)(nil), "centauri.transfermiddleware.v1beta1.RemoveParachainIBCTokenInfo") +} + +func init() { + proto.RegisterFile("centauri/transfermiddleware/v1beta1/parachain_token_info.proto", fileDescriptor_b9cc8ea0d211210f) +} + +var fileDescriptor_b9cc8ea0d211210f = []byte{ + // 392 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0xbf, 0xae, 0xd3, 0x30, + 0x14, 0xc6, 0x63, 0x40, 0x70, 0xeb, 0x22, 0x01, 0xb9, 0xad, 0xa8, 0x82, 0x48, 0x90, 0x59, 0x98, + 0x12, 0x15, 0x3a, 0x75, 0x60, 0x08, 0x2c, 0xd9, 0x50, 0xd4, 0xa9, 0x4b, 0xe4, 0x24, 0x4e, 0x6a, + 0x91, 0xd8, 0x91, 0xe3, 0x16, 0xfa, 0x16, 0x7d, 0x19, 0xde, 0xa1, 0x63, 0x47, 0xa6, 0x80, 0xda, + 0x95, 0x29, 0x4f, 0x80, 0x62, 0x37, 0x14, 0x41, 0x97, 0xbb, 0x9d, 0xa3, 0xef, 0xfc, 0xfc, 0x9d, + 0x3f, 0x86, 0xef, 0x13, 0xc2, 0x24, 0x5e, 0x0b, 0xea, 0x49, 0x81, 0x59, 0x9d, 0x11, 0x51, 0xd2, + 0x34, 0x2d, 0xc8, 0x17, 0x2c, 0x88, 0xb7, 0x99, 0xc6, 0x44, 0xe2, 0xa9, 0x57, 0x61, 0x81, 0x93, + 0x15, 0xa6, 0x2c, 0x92, 0xfc, 0x33, 0x61, 0x11, 0x65, 0x19, 0x77, 0x2b, 0xc1, 0x25, 0x37, 0x5f, + 0xf7, 0xbc, 0xfb, 0x3f, 0xef, 0x9e, 0x79, 0x6b, 0x94, 0xf3, 0x9c, 0xab, 0x7a, 0xaf, 0x8b, 0x34, + 0x6a, 0x39, 0x39, 0xe7, 0x79, 0x41, 0x3c, 0x95, 0xc5, 0xeb, 0xcc, 0x93, 0xb4, 0x24, 0xb5, 0xc4, + 0x65, 0xa5, 0x0b, 0xd0, 0x2f, 0x00, 0xc7, 0x9f, 0x7a, 0xeb, 0xc0, 0xff, 0xb0, 0xe8, 0xcc, 0x03, + 0x96, 0x71, 0x73, 0x0a, 0x07, 0x34, 0x4e, 0xa2, 0x94, 0x30, 0x5e, 0x4e, 0xc0, 0x2b, 0xf0, 0x66, + 0xe0, 0x8f, 0xda, 0xc6, 0x79, 0xba, 0xc5, 0x65, 0x31, 0x47, 0x7f, 0x24, 0x14, 0xde, 0xd0, 0x38, + 0xf9, 0xd8, 0x85, 0xe6, 0x0c, 0xc2, 0x64, 0x85, 0x19, 0x23, 0x45, 0x44, 0xd3, 0xc9, 0x3d, 0xc5, + 0x8c, 0xdb, 0xc6, 0x79, 0xa6, 0x99, 0x8b, 0x86, 0xc2, 0xc1, 0x39, 0x09, 0x52, 0x73, 0x0e, 0x1f, + 0x33, 0x2c, 0xe9, 0x86, 0x9c, 0xbd, 0xee, 0x2b, 0xee, 0x79, 0xdb, 0x38, 0xb7, 0x9a, 0xfb, 0x5b, + 0x45, 0xe1, 0x50, 0xa7, 0xda, 0xd1, 0x85, 0x37, 0xb8, 0xae, 0x89, 0xec, 0xfc, 0x1e, 0x28, 0xee, + 0xb6, 0x6d, 0x9c, 0x27, 0x9a, 0xeb, 0x15, 0x14, 0x3e, 0x52, 0x61, 0x90, 0xa2, 0x6f, 0x00, 0xbe, + 0x08, 0x49, 0xc9, 0x37, 0xe4, 0xfa, 0xd0, 0xff, 0xf6, 0x02, 0xee, 0xd0, 0xcb, 0x12, 0x0e, 0x85, + 0x7a, 0x3a, 0xea, 0x96, 0xac, 0xc6, 0x1f, 0xbe, 0xb5, 0x5c, 0x7d, 0x01, 0xb7, 0xbf, 0x80, 0xbb, + 0xe8, 0x2f, 0xe0, 0xbf, 0xdc, 0x37, 0x8e, 0x71, 0x59, 0x4f, 0x2d, 0xb1, 0x90, 0x8a, 0x45, 0xbb, + 0x1f, 0x0e, 0x08, 0xa1, 0x7e, 0xad, 0xab, 0xf7, 0x67, 0xfb, 0xa3, 0x0d, 0x0e, 0x47, 0x1b, 0xfc, + 0x3c, 0xda, 0x60, 0x77, 0xb2, 0x8d, 0xc3, 0xc9, 0x36, 0xbe, 0x9f, 0x6c, 0x63, 0x69, 0x7d, 0xbd, + 0xf6, 0xab, 0xe4, 0xb6, 0x22, 0x75, 0xfc, 0x50, 0x99, 0xbe, 0xfb, 0x1d, 0x00, 0x00, 0xff, 0xff, + 0xd7, 0x2a, 0x55, 0xb9, 0x81, 0x02, 0x00, 0x00, +} + +func (m *ParachainIBCTokenInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParachainIBCTokenInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParachainIBCTokenInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AssetId) > 0 { + i -= len(m.AssetId) + copy(dAtA[i:], m.AssetId) + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(len(m.AssetId))) + i-- + dAtA[i] = 0x22 + } + if len(m.NativeDenom) > 0 { + i -= len(m.NativeDenom) + copy(dAtA[i:], m.NativeDenom) + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(len(m.NativeDenom))) + i-- + dAtA[i] = 0x1a + } + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.IbcDenom) > 0 { + i -= len(m.IbcDenom) + copy(dAtA[i:], m.IbcDenom) + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(len(m.IbcDenom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RemoveParachainIBCTokenInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RemoveParachainIBCTokenInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RemoveParachainIBCTokenInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.RemoveTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.RemoveTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x12 + if len(m.NativeDenom) > 0 { + i -= len(m.NativeDenom) + copy(dAtA[i:], m.NativeDenom) + i = encodeVarintParachainTokenInfo(dAtA, i, uint64(len(m.NativeDenom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintParachainTokenInfo(dAtA []byte, offset int, v uint64) int { + offset -= sovParachainTokenInfo(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ParachainIBCTokenInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IbcDenom) + if l > 0 { + n += 1 + l + sovParachainTokenInfo(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovParachainTokenInfo(uint64(l)) + } + l = len(m.NativeDenom) + if l > 0 { + n += 1 + l + sovParachainTokenInfo(uint64(l)) + } + l = len(m.AssetId) + if l > 0 { + n += 1 + l + sovParachainTokenInfo(uint64(l)) + } + return n +} + +func (m *RemoveParachainIBCTokenInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.NativeDenom) + if l > 0 { + n += 1 + l + sovParachainTokenInfo(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.RemoveTime) + n += 1 + l + sovParachainTokenInfo(uint64(l)) + return n +} + +func sovParachainTokenInfo(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParachainTokenInfo(x uint64) (n int) { + return sovParachainTokenInfo(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ParachainIBCTokenInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParachainIBCTokenInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParachainIBCTokenInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IbcDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IbcDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NativeDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NativeDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AssetId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AssetId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParachainTokenInfo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RemoveParachainIBCTokenInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RemoveParachainIBCTokenInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RemoveParachainIBCTokenInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NativeDenom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NativeDenom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RemoveTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParachainTokenInfo + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.RemoveTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParachainTokenInfo(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParachainTokenInfo + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParachainTokenInfo(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParachainTokenInfo + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParachainTokenInfo + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParachainTokenInfo + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParachainTokenInfo + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParachainTokenInfo = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParachainTokenInfo = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParachainTokenInfo = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/transfermiddleware/types/params.go b/x/transfermiddleware/types/params.go new file mode 100644 index 000000000..5b252f053 --- /dev/null +++ b/x/transfermiddleware/types/params.go @@ -0,0 +1,40 @@ +package types + +import ( + fmt "fmt" + "time" + + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var KeyRemoveParaTokenDuration = []byte("RemoveParaTokenDuration") + +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func NewParams(removeDuration time.Duration) Params { + return Params{ + Duration: removeDuration, + } +} + +// DefaultParams is the default parameter configuration for the transfermiddleware module. +func DefaultParams() Params { + return NewParams(168 * time.Hour) // 168h (1 week) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair(KeyRemoveParaTokenDuration, &p.Duration, validateTimeDuration), + } +} + +func validateTimeDuration(i interface{}) error { + _, ok := i.(time.Duration) + if !ok { + return fmt.Errorf("invalid parameter type: %T", i) + } + + return nil +} diff --git a/x/transfermiddleware/types/params.pb.go b/x/transfermiddleware/types/params.pb.go new file mode 100644 index 000000000..bac319c24 --- /dev/null +++ b/x/transfermiddleware/types/params.pb.go @@ -0,0 +1,327 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/transfermiddleware/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params holds parameters for the mint module. +type Params struct { + Duration time.Duration `protobuf:"bytes,1,opt,name=duration,proto3,stdduration" json:"duration,omitempty" yaml:"duration"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_1f53976bc1ed34da, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "centauri.transfermiddleware.v1beta1.Params") +} + +func init() { + proto.RegisterFile("centauri/transfermiddleware/v1beta1/params.proto", fileDescriptor_1f53976bc1ed34da) +} + +var fileDescriptor_1f53976bc1ed34da = []byte{ + // 239 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x48, 0x4e, 0xcd, 0x2b, + 0x49, 0x2c, 0x2d, 0xca, 0xd4, 0x2f, 0x29, 0x4a, 0xcc, 0x2b, 0x4e, 0x4b, 0x2d, 0xca, 0xcd, 0x4c, + 0x49, 0xc9, 0x49, 0x2d, 0x4f, 0x2c, 0x4a, 0xd5, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, + 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x52, 0x86, 0xe9, + 0xd0, 0xc3, 0xd4, 0xa1, 0x07, 0xd5, 0x21, 0x25, 0x92, 0x9e, 0x9f, 0x9e, 0x0f, 0x56, 0xaf, 0x0f, + 0x62, 0x41, 0xb4, 0x4a, 0xc9, 0xa5, 0xe7, 0xe7, 0xa7, 0xe7, 0xa4, 0xea, 0x83, 0x79, 0x49, 0xa5, + 0x69, 0xfa, 0x29, 0xa5, 0x45, 0x89, 0x25, 0x99, 0xf9, 0x79, 0x10, 0x79, 0xa5, 0x22, 0x2e, 0xb6, + 0x00, 0xb0, 0x55, 0x42, 0x19, 0x5c, 0x1c, 0x30, 0x39, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x6e, 0x23, + 0x49, 0x3d, 0x88, 0x66, 0x3d, 0x98, 0x66, 0x3d, 0x17, 0xa8, 0x02, 0x27, 0xc3, 0x13, 0xf7, 0xe4, + 0x19, 0x5e, 0xdd, 0x93, 0x17, 0x82, 0x69, 0xd1, 0xc9, 0xcf, 0xcd, 0x2c, 0x49, 0xcd, 0x2d, 0x28, + 0xa9, 0xfc, 0x74, 0x4f, 0x9e, 0xbf, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x26, 0xa7, 0x34, 0xe3, + 0xbe, 0x3c, 0x63, 0x10, 0xdc, 0x74, 0x27, 0x93, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, + 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, + 0x63, 0x88, 0x92, 0xaa, 0xc0, 0x16, 0x26, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x49, 0x6c, 0x60, 0x57, + 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa1, 0x85, 0xdf, 0xf3, 0x3f, 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n1, err1 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintParams(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovParams(uint64(l)) + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/transfermiddleware/types/query.pb.go b/x/transfermiddleware/types/query.pb.go index 8d68990cc..49ac2c3d0 100644 --- a/x/transfermiddleware/types/query.pb.go +++ b/x/transfermiddleware/types/query.pb.go @@ -119,7 +119,8 @@ func (m *QueryEscrowAddressResponse) GetEscrowAddress() string { return "" } -// QueryParaTokenInfoRequest is the request type for the Query/Params RPC method. +// QueryParaTokenInfoRequest is the request type for the Query/Params RPC +// method. type QueryParaTokenInfoRequest struct { NativeDenom string `protobuf:"bytes,1,opt,name=native_denom,json=nativeDenom,proto3" json:"native_denom,omitempty" yaml:"native_denom"` } @@ -164,7 +165,8 @@ func (m *QueryParaTokenInfoRequest) GetNativeDenom() string { return "" } -// QueryParaTokenInfoResponse is the response type for the Query/ParaTokenInfo RPC method. +// QueryParaTokenInfoResponse is the response type for the Query/ParaTokenInfo +// RPC method. type QueryParaTokenInfoResponse struct { IbcDenom string `protobuf:"bytes,1,opt,name=ibc_denom,json=ibcDenom,proto3" json:"ibc_denom,omitempty" yaml:"ibc_denom"` ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` diff --git a/x/transfermiddleware/types/tx.pb.go b/x/transfermiddleware/types/tx.pb.go index d4f731e50..ff5a1ef70 100644 --- a/x/transfermiddleware/types/tx.pb.go +++ b/x/transfermiddleware/types/tx.pb.go @@ -31,7 +31,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // MsgAddParachainInfo represents a message to add new parachain info. type MsgAddParachainIBCTokenInfo struct { - // authority is the address that controls the module (defaults to x/gov unless overwritten). + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty" yaml:"channel_id"` IbcDenom string `protobuf:"bytes,3,opt,name=ibc_denom,json=ibcDenom,proto3" json:"ibc_denom,omitempty" yaml:"ibc_denom"` @@ -143,9 +144,11 @@ func (m *MsgAddParachainIBCTokenInfoResponse) XXX_DiscardUnknown() { var xxx_messageInfo_MsgAddParachainIBCTokenInfoResponse proto.InternalMessageInfo -// MsgRemoveParachainIBCTokenInfo represents a message to remove new parachain info. +// MsgRemoveParachainIBCTokenInfo represents a message to remove new parachain +// info. type MsgRemoveParachainIBCTokenInfo struct { - // authority is the address that controls the module (defaults to x/gov unless overwritten). + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` NativeDenom string `protobuf:"bytes,2,opt,name=native_denom,json=nativeDenom,proto3" json:"native_denom,omitempty" yaml:"ibc_denom"` } @@ -237,7 +240,8 @@ var xxx_messageInfo_MsgRemoveParachainIBCTokenInfoResponse proto.InternalMessage // MsgAddRlyAddress represents a message to add new rly address to allow list type MsgAddRlyAddress struct { - // authority is the address that controls the module (defaults to x/gov unless overwritten). + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` RlyAddress string `protobuf:"bytes,2,opt,name=rly_address,json=rlyAddress,proto3" json:"rly_address,omitempty" yaml:"rly_address"` }