diff --git a/app/app.go b/app/app.go index 3c84a62d6..e1cd0e2ec 100644 --- a/app/app.go +++ b/app/app.go @@ -94,6 +94,9 @@ import ( transfermiddleware "github.com/notional-labs/centauri/v3/x/transfermiddleware" transfermiddlewaretypes "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + ratelimitmodule "github.com/notional-labs/centauri/v3/x/ratelimit" + ratelimitmoduletypes "github.com/notional-labs/centauri/v3/x/ratelimit/types" + consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" "github.com/notional-labs/centauri/v3/x/mint" @@ -196,6 +199,7 @@ var ( router.AppModuleBasic{}, ibc_hooks.AppModuleBasic{}, transfermiddleware.AppModuleBasic{}, + ratelimitmodule.AppModuleBasic{}, consensus.AppModuleBasic{}, alliancemodule.AppModuleBasic{}, // this line is used by starport scaffolding # stargate/app/moduleBasic @@ -308,6 +312,7 @@ func NewCentauriApp( transferModule := transfer.NewAppModule(app.TransferKeeper) routerModule := router.NewAppModule(app.RouterKeeper) transfermiddlewareModule := transfermiddleware.NewAppModule(&app.TransferMiddlewareKeeper) + ratelimitModule := ratelimitmodule.NewAppModule(&app.RatelimitKeeper) icqModule := icq.NewAppModule(app.ICQKeeper) ibcHooksModule := ibc_hooks.NewAppModule() /**** Module Options ****/ @@ -348,6 +353,7 @@ func NewCentauriApp( wasm.NewAppModule(appCodec, &app.WasmKeeper, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.MsgServiceRouter(), app.GetSubspace(wasmtypes.ModuleName)), routerModule, transfermiddlewareModule, + ratelimitModule, alliancemodule.NewAppModule(appCodec, app.AllianceKeeper, app.StakingKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), // this line is used by starport scaffolding # stargate/app/appModule ) @@ -369,6 +375,7 @@ func NewCentauriApp( ibctransfertypes.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, ibchookstypes.ModuleName, icqtypes.ModuleName, authtypes.ModuleName, @@ -406,6 +413,7 @@ func NewCentauriApp( ibchost.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, ibchookstypes.ModuleName, ibctransfertypes.ModuleName, icqtypes.ModuleName, @@ -440,6 +448,7 @@ func NewCentauriApp( icqtypes.ModuleName, routertypes.ModuleName, transfermiddlewaretypes.ModuleName, + ratelimitmoduletypes.ModuleName, ibchookstypes.ModuleName, feegrant.ModuleName, group.ModuleName, diff --git a/app/ibctesting/chain.go b/app/ibctesting/chain.go index df4981dcf..2619ccf4b 100644 --- a/app/ibctesting/chain.go +++ b/app/ibctesting/chain.go @@ -51,6 +51,7 @@ import ( ibctestingtypes "github.com/cosmos/ibc-go/v7/testing/types" centauri "github.com/notional-labs/centauri/v3/app" "github.com/notional-labs/centauri/v3/app/ibctesting/simapp" + ratelimit "github.com/notional-labs/centauri/v3/x/ratelimit/keeper" routerKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -122,7 +123,7 @@ func NewTestChain(t *testing.T, coord *Coordinator, chainID string) *TestChain { Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, amount)), } - app := NewTestingAppDecorator(t, centauri.SetupWithGenesisValSet(t, valSet, []authtypes.GenesisAccount{acc}, balance)) + app := NewTestingAppDecorator(t, centauri.SetupWithGenesisValSet(t, coord.CurrentTime.UTC(), valSet, []authtypes.GenesisAccount{acc}, balance)) // create current header and call begin block header := tmproto.Header{ @@ -616,6 +617,10 @@ func (chain *TestChain) TransferMiddleware() routerKeeper.Keeper { return chain.GetTestSupport().TransferMiddleware() } +func (chain *TestChain) RateLimit() ratelimit.Keeper { + return chain.GetTestSupport().RateLimit() +} + func (chain *TestChain) Balance(acc sdk.AccAddress, denom string) sdk.Coin { return chain.GetTestSupport().BankKeeper().GetBalance(chain.GetContext(), acc, denom) } diff --git a/app/keepers/keepers.go b/app/keepers/keepers.go index 7262dfefc..fdf21fce0 100644 --- a/app/keepers/keepers.go +++ b/app/keepers/keepers.go @@ -77,6 +77,10 @@ import ( transfermiddlewarekeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" transfermiddlewaretypes "github.com/notional-labs/centauri/v3/x/transfermiddleware/types" + ratelimitmodule "github.com/notional-labs/centauri/v3/x/ratelimit" + ratelimitmodulekeeper "github.com/notional-labs/centauri/v3/x/ratelimit/keeper" + ratelimitmoduletypes "github.com/notional-labs/centauri/v3/x/ratelimit/types" + consensusparamkeeper "github.com/cosmos/cosmos-sdk/x/consensus/keeper" consensusparamtypes "github.com/cosmos/cosmos-sdk/x/consensus/types" @@ -132,10 +136,12 @@ type AppKeepers struct { ScopedIBCKeeper capabilitykeeper.ScopedKeeper ScopedTransferKeeper capabilitykeeper.ScopedKeeper ScopedWasmKeeper capabilitykeeper.ScopedKeeper + ScopedRateLimitKeeper capabilitykeeper.ScopedKeeper ConsensusParamsKeeper consensusparamkeeper.Keeper // this line is used by starport scaffolding # stargate/app/keeperDeclaration TransferMiddlewareKeeper transfermiddlewarekeeper.Keeper RouterKeeper *routerkeeper.Keeper + RatelimitKeeper ratelimitmodulekeeper.Keeper AllianceKeeper alliancemodulekeeper.Keeper } @@ -224,6 +230,11 @@ func (appKeepers *AppKeepers) InitNormalKeepers( appKeepers.Wasm08Keeper = wasm08Keeper.NewKeeper(appCodec, appKeepers.keys[wasmtypes.StoreKey], authorityAddress, homePath) // Create Transfer Keepers + // * SendPacket. Originates from the transferKeeper and goes up the stack: + // transferKeeper.SendPacket -> transfermiddleware.SendPacket -> ibc_rate_limit.SendPacket -> ibc_hooks.SendPacket -> channel.SendPacket + // * RecvPacket, message that originates from core IBC and goes down to app, the flow is the other way + // channel.RecvPacket -> ibc_hooks.OnRecvPacket -> ibc_rate_limit.OnRecvPacket -> forward.OnRecvPacket -> transfermiddleware_OnRecvPacket -> transfer.OnRecvPacket + // hooksKeeper := ibchookskeeper.NewKeeper( appKeepers.keys[ibchookstypes.StoreKey], ) @@ -241,7 +252,7 @@ func (appKeepers *AppKeepers) InitNormalKeepers( appKeepers.keys[transfermiddlewaretypes.StoreKey], appKeepers.GetSubspace(transfermiddlewaretypes.ModuleName), appCodec, - &appKeepers.HooksICS4Wrapper, + &appKeepers.RatelimitKeeper, &appKeepers.TransferKeeper, appKeepers.BankKeeper, authorityAddress, @@ -270,8 +281,19 @@ func (appKeepers *AppKeepers) InitNormalKeepers( appKeepers.IBCKeeper.ChannelKeeper, ) - transferIBCModule := transfer.NewIBCModule(appKeepers.TransferKeeper) + appKeepers.RatelimitKeeper = *ratelimitmodulekeeper.NewKeeper( + appCodec, + appKeepers.keys[ratelimitmoduletypes.StoreKey], + appKeepers.GetSubspace(ratelimitmoduletypes.ModuleName), + appKeepers.BankKeeper, + appKeepers.IBCKeeper.ChannelKeeper, + // TODO: Implement ICS4Wrapper in Records and pass records keeper here + &appKeepers.HooksICS4Wrapper, // ICS4Wrapper + appKeepers.TransferMiddlewareKeeper, + authtypes.NewModuleAddress(govtypes.ModuleName).String(), + ) + transferIBCModule := transfer.NewIBCModule(appKeepers.TransferKeeper) scopedICQKeeper := appKeepers.CapabilityKeeper.ScopeToModule(icqtypes.ModuleName) appKeepers.ICQKeeper = icqkeeper.NewKeeper( @@ -293,8 +315,8 @@ func (appKeepers *AppKeepers) InitNormalKeepers( routerkeeper.DefaultForwardTransferPacketTimeoutTimestamp, routerkeeper.DefaultRefundTransferPacketTimeoutTimestamp, ) - - hooksTransferMiddleware := ibc_hooks.NewIBCMiddleware(ibcMiddlewareStack, &appKeepers.HooksICS4Wrapper) + ratelimitMiddlewareStack := ratelimitmodule.NewIBCMiddleware(appKeepers.RatelimitKeeper, ibcMiddlewareStack) + hooksTransferMiddleware := ibc_hooks.NewIBCMiddleware(ratelimitMiddlewareStack, &appKeepers.HooksICS4Wrapper) // Create evidence Keeper for to register the IBC light client misbehaviour evidence route evidenceKeeper := evidencekeeper.NewKeeper( @@ -392,6 +414,7 @@ func (appKeepers *AppKeepers) InitSpecialKeepers( appKeepers.ScopedIBCKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) appKeepers.ScopedTransferKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) appKeepers.ScopedWasmKeeper = appKeepers.CapabilityKeeper.ScopeToModule(wasm.ModuleName) + appKeepers.ScopedRateLimitKeeper = appKeepers.CapabilityKeeper.ScopeToModule(ratelimitmoduletypes.ModuleName) appKeepers.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, appKeepers.keys[upgradetypes.StoreKey], appCodec, homePath, bApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) } @@ -410,6 +433,7 @@ func (appKeepers *AppKeepers) initParamsKeeper(appCodec codec.BinaryCodec, legac paramsKeeper.Subspace(minttypes.ModuleName).WithKeyTable(minttypes.ParamKeyTable()) paramsKeeper.Subspace(crisistypes.ModuleName) paramsKeeper.Subspace(ibctransfertypes.ModuleName) + paramsKeeper.Subspace(ratelimitmoduletypes.ModuleName) paramsKeeper.Subspace(icqtypes.ModuleName) paramsKeeper.Subspace(ibchost.ModuleName) paramsKeeper.Subspace(alliancemoduletypes.ModuleName) diff --git a/app/keepers/keys.go b/app/keepers/keys.go index df7cf600b..12ecd782c 100644 --- a/app/keepers/keys.go +++ b/app/keepers/keys.go @@ -33,6 +33,7 @@ import ( storetypes "github.com/cosmos/cosmos-sdk/store/types" minttypes "github.com/notional-labs/centauri/v3/x/mint/types" + ratelimitmoduletypes "github.com/notional-labs/centauri/v3/x/ratelimit/types" "github.com/CosmWasm/wasmd/x/wasm" wasm08types "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/types" @@ -47,6 +48,7 @@ func (appKeepers *AppKeepers) GenerateKeys() { govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, evidencetypes.StoreKey, ibctransfertypes.StoreKey, icqtypes.StoreKey, capabilitytypes.StoreKey, consensusparamtypes.StoreKey, wasm08types.StoreKey, crisistypes.StoreKey, routertypes.StoreKey, transfermiddlewaretypes.StoreKey, group.StoreKey, minttypes.StoreKey, alliancemoduletypes.StoreKey, wasm.StoreKey, ibchookstypes.StoreKey, + ratelimitmoduletypes.StoreKey, ) // Define transient store keys diff --git a/app/test_access.go b/app/test_access.go index bd348cb82..535702163 100644 --- a/app/test_access.go +++ b/app/test_access.go @@ -16,7 +16,8 @@ import ( ibctransferkeeper "github.com/cosmos/ibc-go/v7/modules/apps/transfer/keeper" ibckeeper "github.com/cosmos/ibc-go/v7/modules/core/keeper" wasm08 "github.com/cosmos/ibc-go/v7/modules/light-clients/08-wasm/keeper" - routerKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" + ratelimitKeeper "github.com/notional-labs/centauri/v3/x/ratelimit/keeper" + tfmdKeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" ) type TestSupport struct { @@ -81,6 +82,10 @@ func (s TestSupport) GetTxConfig() client.TxConfig { return s.app.GetTxConfig() } -func (s TestSupport) TransferMiddleware() routerKeeper.Keeper { +func (s TestSupport) TransferMiddleware() tfmdKeeper.Keeper { return s.app.TransferMiddlewareKeeper } + +func (s TestSupport) RateLimit() ratelimitKeeper.Keeper { + return s.app.RatelimitKeeper +} diff --git a/app/test_helpers.go b/app/test_helpers.go index f9a593a1d..f7719e8d5 100644 --- a/app/test_helpers.go +++ b/app/test_helpers.go @@ -96,6 +96,7 @@ func setup(tb testing.TB, withGenesis bool, invCheckPeriod uint) (*CentauriApp, // account. A Nop logger is set in FeeAbs. func SetupWithGenesisValSet( t *testing.T, + ctxTime time.Time, valSet *tmtypes.ValidatorSet, genAccs []authtypes.GenesisAccount, balances ...banktypes.Balance, @@ -159,6 +160,7 @@ func SetupWithGenesisValSet( // init chain will set the validator set and initialize the genesis accounts app.InitChain( abci.RequestInitChain{ + Time: ctxTime, Validators: []abci.ValidatorUpdate{}, ConsensusParams: DefaultConsensusParams, AppStateBytes: stateBytes, diff --git a/go.mod b/go.mod index 9b3f742dd..d2174cad1 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/cosmos/cosmos-sdk v0.47.3 github.com/cosmos/gogoproto v1.4.10 github.com/cosmos/ibc-go/v7 v7.0.1 + github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.3 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/grpc-gateway v1.16.0 @@ -44,7 +45,6 @@ require ( github.com/cosmos/ics23/go v0.9.1-0.20221207100636-b1abd8678aab // indirect github.com/docker/distribution v2.8.2+incompatible // indirect github.com/go-playground/locales v0.14.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.3.0 // indirect diff --git a/proto/centauri/ratelimit/v1beta1/epoch.proto b/proto/centauri/ratelimit/v1beta1/epoch.proto new file mode 100644 index 000000000..8f0bf2669 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/epoch.proto @@ -0,0 +1,65 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "x/ratelimit/types"; + +message EpochInfo { + // identifier is a unique reference to this particular timer. + string identifier = 1; + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + google.protobuf.Timestamp start_time = 2 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"start_time\"" + ]; + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + google.protobuf.Duration duration = 3 [ + (gogoproto.nullable) = false, + (gogoproto.stdduration) = true, + (gogoproto.jsontag) = "duration,omitempty", + (gogoproto.moretags) = "yaml:\"duration\"" + ]; + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + int64 current_epoch = 4; + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + google.protobuf.Timestamp current_epoch_start_time = 5 [ + (gogoproto.stdtime) = true, + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"current_epoch_start_time\"" + ]; + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + bool epoch_counting_started = 6; + reserved 7; + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + int64 current_epoch_start_height = 8; +} \ No newline at end of file diff --git a/proto/centauri/ratelimit/v1beta1/genesis.proto b/proto/centauri/ratelimit/v1beta1/genesis.proto new file mode 100644 index 000000000..091dcb855 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/genesis.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "centauri/ratelimit/v1beta1/params.proto"; +import "centauri/ratelimit/v1beta1/ratelimit.proto"; +import "centauri/ratelimit/v1beta1/epoch.proto"; + +option go_package = "x/ratelimit/types"; + +// GenesisState defines the ratelimit module's genesis state. +message GenesisState { + Params params = 1 [ + (gogoproto.moretags) = "yaml:\"params\"", + (gogoproto.nullable) = false + ]; + + repeated RateLimit rate_limits = 2 [ + (gogoproto.moretags) = "yaml:\"rate_limits\"", + (gogoproto.nullable) = false + ]; + + repeated WhitelistedAddressPair whitelisted_address_pairs = 3 [ + (gogoproto.moretags) = "yaml:\"whitelisted_address_pairs\"", + (gogoproto.nullable) = false + ]; + + repeated string pending_send_packet_sequence_numbers = 4; + + repeated EpochInfo epochs = 5 [ (gogoproto.nullable) = false ]; +} diff --git a/proto/centauri/ratelimit/v1beta1/params.proto b/proto/centauri/ratelimit/v1beta1/params.proto new file mode 100644 index 000000000..22186f8ac --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/params.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +option go_package = "x/ratelimit/types"; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; + +// Params holds parameters for the mint module. +message Params {} diff --git a/proto/centauri/ratelimit/v1beta1/query.proto b/proto/centauri/ratelimit/v1beta1/query.proto new file mode 100644 index 000000000..7826edc2f --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/query.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "centauri/ratelimit/v1beta1/ratelimit.proto"; +import "google/api/annotations.proto"; +import "gogoproto/gogo.proto"; + +option go_package = "x/ratelimit/types"; + +// Query defines the gRPC querier service. +service Query { + rpc AllRateLimits(QueryAllRateLimitsRequest) + returns (QueryAllRateLimitsResponse) { + option (google.api.http).get = "/centauri/ratelimit/ratelimits"; + } + rpc RateLimit(QueryRateLimitRequest) returns (QueryRateLimitResponse) { + option (google.api.http).get = + "/centauri/ratelimit/ratelimit/{channel_id}/by_denom"; + } + rpc RateLimitsByChainId(QueryRateLimitsByChainIdRequest) + returns (QueryRateLimitsByChainIdResponse) { + option (google.api.http).get = "/centauri/ratelimit/ratelimits/{chain_id}"; + } + rpc RateLimitsByChannelId(QueryRateLimitsByChannelIdRequest) + returns (QueryRateLimitsByChannelIdResponse) { + option (google.api.http).get = + "/centauri/ratelimit/ratelimits/{channel_id}"; + } + rpc AllWhitelistedAddresses(QueryAllWhitelistedAddressesRequest) + returns (QueryAllWhitelistedAddressesResponse) { + option (google.api.http).get = "/centauri/ratelimit/whitelisted_addresses"; + } +} + +message QueryAllRateLimitsRequest {} +message QueryAllRateLimitsResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitRequest { + string denom = 1; + string channel_id = 2; +} +message QueryRateLimitResponse { RateLimit rate_limit = 1; } + +message QueryRateLimitsByChainIdRequest { string chain_id = 1; } +message QueryRateLimitsByChainIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryRateLimitsByChannelIdRequest { string channel_id = 1; } +message QueryRateLimitsByChannelIdResponse { + repeated RateLimit rate_limits = 1 [ (gogoproto.nullable) = false ]; +} + +message QueryAllWhitelistedAddressesRequest {} +message QueryAllWhitelistedAddressesResponse { + repeated WhitelistedAddressPair address_pairs = 1 + [ (gogoproto.nullable) = false ]; +} \ No newline at end of file diff --git a/proto/centauri/ratelimit/v1beta1/ratelimit.proto b/proto/centauri/ratelimit/v1beta1/ratelimit.proto new file mode 100644 index 000000000..26a2bb0f3 --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/ratelimit.proto @@ -0,0 +1,56 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; + +option go_package = "x/ratelimit/types"; + +enum PacketDirection { + option (gogoproto.goproto_enum_prefix) = false; + + PACKET_SEND = 0; + PACKET_RECV = 1; +} + +message Path { + string denom = 1; + string channel_id = 2; +} + +message Quota { + string max_percent_send = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 3; +} + +message Flow { + string inflow = 1 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string outflow = 2 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string channel_value = 3 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; +} + +message RateLimit { + Path path = 1; + Quota quota = 2; + Flow flow = 3; +} + +message WhitelistedAddressPair { + string sender = 1; + string receiver = 2; +} diff --git a/proto/centauri/ratelimit/v1beta1/tx.proto b/proto/centauri/ratelimit/v1beta1/tx.proto new file mode 100755 index 000000000..4012cb04d --- /dev/null +++ b/proto/centauri/ratelimit/v1beta1/tx.proto @@ -0,0 +1,81 @@ +syntax = "proto3"; +package centauri.ratelimit.v1beta1; + +import "gogoproto/gogo.proto"; +import "cosmos/msg/v1/msg.proto"; + +option go_package = "x/ratelimit/types"; + +// Msg defines the transfer middleware Msg service. +service Msg { + rpc AddTransferRateLimit(MsgAddRateLimit) returns (MsgAddRateLimitResponse); + rpc UpdateTransferRateLimit(MsgUpdateRateLimit) returns (MsgUpdateRateLimitResponse); + rpc RemoveTransferRateLimit(MsgRemoveRateLimit) returns (MsgRemoveRateLimitResponse); + rpc ResetTransferRateLimit(MsgResetRateLimit) returns (MsgResetRateLimitResponse); +} + +message MsgAddRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; + string max_percent_send = 4 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 6; +} + +message MsgAddRateLimitResponse {} + +message MsgUpdateRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; + string max_percent_send = 4 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + string max_percent_recv = 5 [ + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int", + (gogoproto.nullable) = false + ]; + uint64 duration_hours = 6; +} + +message MsgUpdateRateLimitResponse {} + +message MsgRemoveRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; +} + +message MsgRemoveRateLimitResponse {} + +message MsgResetRateLimit { + option (cosmos.msg.v1.signer) = "authority"; + + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; + string denom = 2; + string channel_id = 3; +} + +message MsgResetRateLimitResponse {} diff --git a/proto/centauri/transfermiddleware/v1beta1/genesis.proto b/proto/centauri/transfermiddleware/v1beta1/genesis.proto index b6b978d0a..ebd341cac 100644 --- a/proto/centauri/transfermiddleware/v1beta1/genesis.proto +++ b/proto/centauri/transfermiddleware/v1beta1/genesis.proto @@ -14,3 +14,4 @@ message GenesisState { [ (gogoproto.nullable) = false ]; Params params = 2 [ (gogoproto.nullable) = false ]; } + diff --git a/proto/centauri/transfermiddleware/v1beta1/tx.proto b/proto/centauri/transfermiddleware/v1beta1/tx.proto index d8ce98314..a11e679c9 100644 --- a/proto/centauri/transfermiddleware/v1beta1/tx.proto +++ b/proto/centauri/transfermiddleware/v1beta1/tx.proto @@ -22,8 +22,6 @@ message MsgAddParachainIBCTokenInfo { // authority is the address that controls the module (defaults to x/gov unless // overwritten). string authority = 1 [ (gogoproto.moretags) = "yaml:\"authority\"" ]; - ; - string channel_id = 2 [ (gogoproto.moretags) = "yaml:\"channel_id\"" ]; string ibc_denom = 3 [ (gogoproto.moretags) = "yaml:\"ibc_denom\"" ]; string native_denom = 4 [ (gogoproto.moretags) = "yaml:\"native_denom\"" ]; diff --git a/x/ratelimit/client/cli/query.go b/x/ratelimit/client/cli/query.go new file mode 100644 index 000000000..69acddd48 --- /dev/null +++ b/x/ratelimit/client/cli/query.go @@ -0,0 +1,26 @@ +package cli + +import ( + "fmt" + + "github.com/spf13/cobra" + + "github.com/cosmos/cosmos-sdk/client" + + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +// GetQueryCmd returns the cli query commands for this module. +func GetQueryCmd() *cobra.Command { + // Group ratelimit queries under a subcommand + cmd := &cobra.Command{ + Use: types.ModuleName, + Short: fmt.Sprintf("Querying commands for the %s module", types.ModuleName), + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + RunE: client.ValidateCmd, + } + + cmd.AddCommand() + return cmd +} diff --git a/x/ratelimit/client/cli/tx.go b/x/ratelimit/client/cli/tx.go new file mode 100644 index 000000000..f47ed51ad --- /dev/null +++ b/x/ratelimit/client/cli/tx.go @@ -0,0 +1,22 @@ +package cli + +import ( + "fmt" + + "github.com/notional-labs/centauri/v3/x/ratelimit/types" + "github.com/spf13/cobra" +) + +// GetTxCmd returns the tx commands for router +func GetTxCmd() *cobra.Command { + txCmd := &cobra.Command{ + Use: "transfermiddleware", + DisableFlagParsing: true, + SuggestionsMinimumDistance: 2, + Short: fmt.Sprintf("Tx commands for the %s module", types.ModuleName), + } + + txCmd.AddCommand() + + return txCmd +} diff --git a/x/ratelimit/ibc_middleware.go b/x/ratelimit/ibc_middleware.go new file mode 100644 index 000000000..e38b7f458 --- /dev/null +++ b/x/ratelimit/ibc_middleware.go @@ -0,0 +1,184 @@ +package ratelimit + +import ( + "fmt" + + "github.com/notional-labs/centauri/v3/x/ratelimit/keeper" + + sdk "github.com/cosmos/cosmos-sdk/types" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +var _ porttypes.Middleware = &IBCMiddleware{} + +type IBCMiddleware struct { + app porttypes.IBCModule + keeper keeper.Keeper +} + +func NewIBCMiddleware(k keeper.Keeper, app porttypes.IBCModule) IBCMiddleware { + return IBCMiddleware{ + app: app, + keeper: k, + } +} + +// OnChanOpenInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenInit(ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + return im.app.OnChanOpenInit( + ctx, + order, + connectionHops, + portID, + channelID, + channelCap, + counterparty, + version, + ) +} + +// OnChanOpenTry implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID, + channelID string, + channelCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + return im.app.OnChanOpenTry(ctx, order, connectionHops, portID, channelID, channelCap, counterparty, counterpartyVersion) +} + +// OnChanOpenAck implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenAck( + ctx sdk.Context, + portID, + channelID string, + counterpartyChannelID string, + counterpartyVersion string, +) error { + return im.app.OnChanOpenAck(ctx, portID, channelID, counterpartyChannelID, counterpartyVersion) +} + +// OnChanOpenConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanOpenConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanOpenConfirm(ctx, portID, channelID) +} + +// OnChanCloseInit implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseInit( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseInit(ctx, portID, channelID) +} + +// OnChanCloseConfirm implements the IBCMiddleware interface +func (im IBCMiddleware) OnChanCloseConfirm( + ctx sdk.Context, + portID, + channelID string, +) error { + return im.app.OnChanCloseConfirm(ctx, portID, channelID) +} + +// OnRecvPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) exported.Acknowledgement { + // Check if the packet would cause the rate limit to be exceeded, + // and if so, return an ack error + if err := im.keeper.ReceiveRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 packet receive was denied: %s", err.Error())) + return channeltypes.NewErrorAcknowledgement(err) + } + + // If the packet was not rate-limited, pass it down to the Transfer OnRecvPacket callback + return im.app.OnRecvPacket(ctx, packet, relayer) +} + +// OnAcknowledgementPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + relayer sdk.AccAddress, +) error { + if err := im.keeper.AcknowledgeRateLimitedPacket(ctx, packet, acknowledgement); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 RateLimited OnAckPacket failed: %s", err.Error())) + return err + } + return im.app.OnAcknowledgementPacket(ctx, packet, acknowledgement, relayer) +} + +// OnTimeoutPacket implements the IBCMiddleware interface +func (im IBCMiddleware) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + relayer sdk.AccAddress, +) error { + if err := im.keeper.TimeoutRateLimitedPacket(ctx, packet); err != nil { + im.keeper.Logger(ctx).Error(fmt.Sprintf("ICS20 RateLimited OnTimeoutPacket failed: %s", err.Error())) + return err + } + return im.app.OnTimeoutPacket(ctx, packet, relayer) +} + +// SendPacket implements the ICS4 Wrapper interface +// Rate-limited SendPacket found in RateLimit Keeper +func (im IBCMiddleware) SendPacket( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + return im.keeper.SendPacket( + ctx, + chanCap, + sourcePort, + sourceChannel, + timeoutHeight, + timeoutTimestamp, + data, + ) +} + +// WriteAcknowledgement implements the ICS4 Wrapper interface +func (im IBCMiddleware) WriteAcknowledgement( + ctx sdk.Context, + chanCap *capabilitytypes.Capability, + packet exported.PacketI, + ack exported.Acknowledgement, +) error { + return im.keeper.WriteAcknowledgement(ctx, chanCap, packet, ack) +} + +// GetAppVersion returns the application version of the underlying application +func (i IBCMiddleware) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return i.keeper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/abci.go b/x/ratelimit/keeper/abci.go new file mode 100644 index 000000000..608f80278 --- /dev/null +++ b/x/ratelimit/keeper/abci.go @@ -0,0 +1,57 @@ +package keeper + +import ( + "fmt" + "time" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +// BeginBlocker of epochs module. +func (k Keeper) BeginBlocker(ctx sdk.Context) { + defer telemetry.ModuleMeasureSince(types.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker) + k.IterateEpochInfo(ctx, func(index int64, epochInfo types.EpochInfo) (stop bool) { + logger := k.Logger(ctx) + + // If blocktime < initial epoch start time, return + if ctx.BlockTime().Before(epochInfo.StartTime) { + return + } + // if epoch counting hasn't started, signal we need to start. + shouldInitialEpochStart := !epochInfo.EpochCountingStarted + + epochEndTime := epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + shouldEpochStart := (ctx.BlockTime().After(epochEndTime)) || shouldInitialEpochStart + + if !shouldEpochStart { + return false + } + epochInfo.CurrentEpochStartHeight = ctx.BlockHeight() + + if shouldInitialEpochStart { + epochInfo.EpochCountingStarted = true + epochInfo.CurrentEpoch = 1 + epochInfo.CurrentEpochStartTime = epochInfo.StartTime + logger.Info(fmt.Sprintf("Starting new epoch with identifier %s epoch number %d", epochInfo.Identifier, epochInfo.CurrentEpoch)) + } else { + k.AfterEpochEnd(ctx, epochInfo) + epochInfo.CurrentEpoch += 1 + epochInfo.CurrentEpochStartTime = epochInfo.CurrentEpochStartTime.Add(epochInfo.Duration) + logger.Info(fmt.Sprintf("Starting epoch with identifier %s epoch number %d", epochInfo.Identifier, epochInfo.CurrentEpoch)) + } + + // emit new epoch start event, set epoch info, and run BeforeEpochStart hook + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeEpochStart, + sdk.NewAttribute(types.AttributeEpochNumber, fmt.Sprintf("%d", epochInfo.CurrentEpoch)), + sdk.NewAttribute(types.AttributeEpochStartTime, fmt.Sprintf("%d", epochInfo.CurrentEpochStartTime.Unix())), + ), + ) + k.setEpochInfo(ctx, epochInfo) + + return false + }) +} diff --git a/x/ratelimit/keeper/epoch.go b/x/ratelimit/keeper/epoch.go new file mode 100644 index 000000000..57a781d78 --- /dev/null +++ b/x/ratelimit/keeper/epoch.go @@ -0,0 +1,132 @@ +package keeper + +import ( + "fmt" + "time" + + "github.com/gogo/protobuf/proto" + + "github.com/notional-labs/centauri/v3/x/ratelimit/types" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// HasEpochInfo return true if has epoch info +func (k Keeper) HasEpochInfo(ctx sdk.Context, identifier string) bool { + store := ctx.KVStore(k.storeKey) + return store.Has(append(types.EpochKeyPrefix, []byte(identifier)...)) +} + +// GetEpochInfo returns epoch info by identifier. +func (k Keeper) GetEpochInfo(ctx sdk.Context, identifier string) types.EpochInfo { + epoch := types.EpochInfo{} + store := ctx.KVStore(k.storeKey) + b := store.Get(append(types.EpochKeyPrefix, []byte(identifier)...)) + if b == nil { + return epoch + } + err := proto.Unmarshal(b, &epoch) + if err != nil { + panic(err) + } + return epoch +} + +// AddEpochInfo adds a new epoch info. Will return an error if the epoch fails validation, +// or re-uses an existing identifier. +// This method also sets the start time if left unset, and sets the epoch start height. +func (k Keeper) AddEpochInfo(ctx sdk.Context, epoch types.EpochInfo) error { + err := epoch.Validate() + if err != nil { + return err + } + // Check if identifier already exists + if (k.GetEpochInfo(ctx, epoch.Identifier) != types.EpochInfo{}) { + return fmt.Errorf("epoch with identifier %s already exists", epoch.Identifier) + } + + // Initialize empty and default epoch values + if epoch.StartTime.Equal(time.Time{}) { + epoch.StartTime = ctx.BlockTime() + } + epoch.CurrentEpochStartHeight = ctx.BlockHeight() + k.setEpochInfo(ctx, epoch) + return nil +} + +// setEpochInfo set epoch info. +func (k Keeper) setEpochInfo(ctx sdk.Context, epoch types.EpochInfo) { + store := ctx.KVStore(k.storeKey) + value, err := proto.Marshal(&epoch) + if err != nil { + panic(err) + } + store.Set(append(types.EpochKeyPrefix, []byte(epoch.Identifier)...), value) +} + +// DeleteEpochInfo delete epoch info. +func (k Keeper) DeleteEpochInfo(ctx sdk.Context, identifier string) { + store := ctx.KVStore(k.storeKey) + store.Delete(append(types.EpochKeyPrefix, []byte(identifier)...)) +} + +// IterateEpochInfo iterate through epochs. +func (k Keeper) IterateEpochInfo(ctx sdk.Context, fn func(index int64, epochInfo types.EpochInfo) (stop bool)) { + store := ctx.KVStore(k.storeKey) + + iterator := sdk.KVStorePrefixIterator(store, types.EpochKeyPrefix) + defer iterator.Close() + + i := int64(0) + + for ; iterator.Valid(); iterator.Next() { + epoch := types.EpochInfo{} + err := proto.Unmarshal(iterator.Value(), &epoch) + if err != nil { + panic(err) + } + stop := fn(i, epoch) + + if stop { + break + } + i++ + } +} + +// AllEpochInfos iterate through epochs to return all epochs info. +func (k Keeper) AllEpochInfos(ctx sdk.Context) []types.EpochInfo { + epochs := []types.EpochInfo{} + k.IterateEpochInfo(ctx, func(index int64, epochInfo types.EpochInfo) (stop bool) { + epochs = append(epochs, epochInfo) + return false + }) + return epochs +} + +// NumBlocksSinceEpochStart returns the number of blocks since the epoch started. +// if the epoch started on block N, then calling this during block N (after BeforeEpochStart) +// would return 0. +// Calling it any point in block N+1 (assuming the epoch doesn't increment) would return 1. +func (k Keeper) NumBlocksSinceEpochStart(ctx sdk.Context, identifier string) (int64, error) { + epoch := k.GetEpochInfo(ctx, identifier) + if (epoch == types.EpochInfo{}) { + return 0, fmt.Errorf("epoch with identifier %s not found", identifier) + } + return ctx.BlockHeight() - epoch.CurrentEpochStartHeight, nil +} + +func (k Keeper) AfterEpochEnd(ctx sdk.Context, epochInfo types.EpochInfo) { + if epochInfo.Identifier == types.DAY_EPOCH { + epochHour := uint64(epochInfo.CurrentEpoch) + + for _, rateLimit := range k.GetAllRateLimits(ctx) { + if epochHour%rateLimit.Quota.DurationHours == 0 { + err := k.ResetRateLimit(ctx, rateLimit.Path.Denom, rateLimit.Path.ChannelId) + if err != nil { + k.Logger(ctx).Error(fmt.Sprintf("Unable to reset quota for Denom: %s, ChannelId: %s", rateLimit.Path.Denom, rateLimit.Path.ChannelId)) + } + } + } + } +} diff --git a/x/ratelimit/keeper/genesis.go b/x/ratelimit/keeper/genesis.go new file mode 100644 index 000000000..1d6722445 --- /dev/null +++ b/x/ratelimit/keeper/genesis.go @@ -0,0 +1,48 @@ +package keeper + +import ( + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +func (k Keeper) InitGenesis(ctx sdk.Context, genState types.GenesisState) { + k.SetParams(ctx, genState.Params) + for _, rateLimit := range genState.RateLimits { + k.SetRateLimit(ctx, rateLimit) + } + for _, addressPair := range genState.WhitelistedAddressPairs { + k.SetWhitelistedAddressPair(ctx, addressPair) + } + for _, pendingPacketId := range genState.PendingSendPacketSequenceNumbers { + splits := strings.Split(pendingPacketId, "/") + if len(splits) != 2 { + panic("Invalid pending send packet, must be of form: {channelId}/{sequenceNumber}") + } + channelId := splits[0] + sequence, err := strconv.ParseUint(splits[1], 10, 64) + if err != nil { + panic(err) + } + k.SetPendingSendPacket(ctx, channelId, sequence) + } + for _, epoch := range genState.Epochs { + err := k.AddEpochInfo(ctx, epoch) + if err != nil { + panic(err) + } + } +} + +func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + genesis := types.DefaultGenesisState() + + genesis.Params = k.GetParams(ctx) + genesis.RateLimits = k.GetAllRateLimits(ctx) + genesis.WhitelistedAddressPairs = k.GetAllWhitelistedAddressPairs(ctx) + genesis.PendingSendPacketSequenceNumbers = k.GetAllPendingSendPackets(ctx) + + return genesis +} diff --git a/x/ratelimit/keeper/grpc_query.go b/x/ratelimit/keeper/grpc_query.go new file mode 100644 index 000000000..77e7c6ae2 --- /dev/null +++ b/x/ratelimit/keeper/grpc_query.go @@ -0,0 +1,78 @@ +package keeper + +import ( + "context" + + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +var _ types.QueryServer = Keeper{} + +// Query all rate limits +func (k Keeper) AllRateLimits(goCtx context.Context, req *types.QueryAllRateLimitsRequest) (*types.QueryAllRateLimitsResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + rateLimits := k.GetAllRateLimits(ctx) + return &types.QueryAllRateLimitsResponse{RateLimits: rateLimits}, nil +} + +// Query a rate limit by denom and channelId +func (k Keeper) RateLimit(goCtx context.Context, req *types.QueryRateLimitRequest) (*types.QueryRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + rateLimit, found := k.GetRateLimit(ctx, req.Denom, req.ChannelId) + if !found { + return &types.QueryRateLimitResponse{}, nil + } + return &types.QueryRateLimitResponse{RateLimit: &rateLimit}, nil +} + +// Query all rate limits for a given chain +func (k Keeper) RateLimitsByChainId(goCtx context.Context, req *types.QueryRateLimitsByChainIdRequest) (*types.QueryRateLimitsByChainIdResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + + // Determine the client state from the channel Id + _, clientState, err := k.channelKeeper.GetChannelClientState(ctx, transfertypes.PortID, rateLimit.Path.ChannelId) + if err != nil { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Unable to fetch client state from channelId") + } + client, ok := clientState.(*ibctmtypes.ClientState) + if !ok { + return &types.QueryRateLimitsByChainIdResponse{}, errorsmod.Wrapf(types.ErrInvalidClientState, "Client state is not tendermint") + } + + // If the chain ID matches, add the rate limit to the returned list + if client.ChainId == req.ChainId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChainIdResponse{RateLimits: rateLimits}, nil +} + +// Query all rate limits for a given channel +func (k Keeper) RateLimitsByChannelId(goCtx context.Context, req *types.QueryRateLimitsByChannelIdRequest) (*types.QueryRateLimitsByChannelIdResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + rateLimits := []types.RateLimit{} + for _, rateLimit := range k.GetAllRateLimits(ctx) { + // If the channel ID matches, add the rate limit to the returned list + if rateLimit.Path.ChannelId == req.ChannelId { + rateLimits = append(rateLimits, rateLimit) + } + } + + return &types.QueryRateLimitsByChannelIdResponse{RateLimits: rateLimits}, nil +} + +// Query all whitelisted addresses +func (k Keeper) AllWhitelistedAddresses(goCtx context.Context, req *types.QueryAllWhitelistedAddressesRequest) (*types.QueryAllWhitelistedAddressesResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + whitelistedAddresses := k.GetAllWhitelistedAddressPairs(ctx) + return &types.QueryAllWhitelistedAddressesResponse{AddressPairs: whitelistedAddresses}, nil +} diff --git a/x/ratelimit/keeper/keeper.go b/x/ratelimit/keeper/keeper.go new file mode 100644 index 000000000..5265b6a34 --- /dev/null +++ b/x/ratelimit/keeper/keeper.go @@ -0,0 +1,65 @@ +package keeper + +import ( + "fmt" + + "github.com/cometbft/cometbft/libs/log" + "github.com/cosmos/cosmos-sdk/codec" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" + tfmwkeeper "github.com/notional-labs/centauri/v3/x/transfermiddleware/keeper" +) + +type Keeper struct { + storeKey storetypes.StoreKey + cdc codec.BinaryCodec + paramstore paramtypes.Subspace + + bankKeeper types.BankKeeper + channelKeeper types.ChannelKeeper + ics4Wrapper porttypes.ICS4Wrapper + tfmwKeeper tfmwkeeper.Keeper + + // the address capable of executing a AddParachainIBCTokenInfo and RemoveParachainIBCTokenInfo message. Typically, this + // should be the x/gov module account. + authority string +} + +func NewKeeper( + cdc codec.BinaryCodec, + key storetypes.StoreKey, + ps paramtypes.Subspace, + bankKeeper types.BankKeeper, + channelKeeper types.ChannelKeeper, + ics4Wrapper porttypes.ICS4Wrapper, + tfmwKeeper tfmwkeeper.Keeper, + authority string, +) *Keeper { + return &Keeper{ + cdc: cdc, + storeKey: key, + paramstore: ps, + bankKeeper: bankKeeper, + channelKeeper: channelKeeper, + ics4Wrapper: ics4Wrapper, + tfmwKeeper: tfmwKeeper, + authority: authority, + } +} + +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +// GetParams get all parameters as types.Params +func (k Keeper) GetParams(ctx sdk.Context) types.Params { + return types.NewParams() +} + +// SetParams set the params +func (k Keeper) SetParams(ctx sdk.Context, params types.Params) { + k.paramstore.SetParamSet(ctx, ¶ms) +} diff --git a/x/ratelimit/keeper/msg_server.go b/x/ratelimit/keeper/msg_server.go new file mode 100644 index 000000000..b53e6b986 --- /dev/null +++ b/x/ratelimit/keeper/msg_server.go @@ -0,0 +1,83 @@ +package keeper + +import ( + "context" + + "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +var _ types.MsgServer = msgServer{} + +// NewMsgServerImpl returns an implementation of the MsgServer interface +// for the provided Keeper. +func NewMsgServerImpl(keeper Keeper) types.MsgServer { + return &msgServer{ + Keeper: keeper, + } +} + +type msgServer struct { + Keeper +} + +func (k Keeper) AddTransferRateLimit(goCtx context.Context, msg *types.MsgAddRateLimit) (*types.MsgAddRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.AddRateLimit(ctx, msg) + if err != nil { + return nil, err + } + + return &types.MsgAddRateLimitResponse{}, nil +} + +func (k Keeper) UpdateTransferRateLimit(goCtx context.Context, msg *types.MsgUpdateRateLimit) (*types.MsgUpdateRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.UpdateRateLimit(ctx, msg) + if err != nil { + return nil, err + } + + return &types.MsgUpdateRateLimitResponse{}, nil +} + +func (k Keeper) RemoveTransferRateLimit(goCtx context.Context, msg *types.MsgRemoveRateLimit) (*types.MsgRemoveRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.RemoveRateLimit(ctx, msg.Denom, msg.ChannelId) + if err != nil { + return nil, err + } + + return &types.MsgRemoveRateLimitResponse{}, nil +} + +func (k Keeper) ResetTransferRateLimit(goCtx context.Context, msg *types.MsgResetRateLimit) (*types.MsgResetRateLimitResponse, error) { + ctx := sdk.UnwrapSDKContext(goCtx) + + if k.authority != msg.Authority { + return nil, errors.Wrapf(govtypes.ErrInvalidSigner, "invalid authority; expected %s, got %s", k.authority, msg.Authority) + } + + err := k.ResetRateLimit(ctx, msg.Denom, msg.ChannelId) + if err != nil { + return nil, err + } + return &types.MsgResetRateLimitResponse{}, nil +} diff --git a/x/ratelimit/keeper/packet.go b/x/ratelimit/keeper/packet.go new file mode 100644 index 000000000..c9cd19fa3 --- /dev/null +++ b/x/ratelimit/keeper/packet.go @@ -0,0 +1,279 @@ +package keeper + +import ( + "encoding/json" + "fmt" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" + + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +type RateLimitedPacketInfo struct { + ChannelID string + Denom string + Amount math.Int + Sender string + Receiver string +} + +// Parse the denom from the Send Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a SEND packet depends on whether +// it was a NATIVE token (e.g. ustrd, stuatom, etc.) or NON-NATIVE token (e.g. ibc/...)... +// +// We can identify if the token is native or not by parsing the trace denom from the packet +// If the token is NATIVE, it will not have a prefix (e.g. ustrd), +// and if it is NON-NATIVE, it will have a prefix (e.g. transfer/channel-2/uosmo) +// +// For NATIVE denoms, return as is (e.g. ustrd) +// For NON-NATIVE denoms, take the ibc hash (e.g. hash "transfer/channel-2/usoms" into "ibc/...") +func (k Keeper) ParseDenomFromSendPacket(packet transfertypes.FungibleTokenPacketData) (denom string) { + // Determine the denom by looking at the denom trace path + denomTrace := transfertypes.ParseDenomTrace(packet.Denom) + + // Native assets will have an empty trace path and can be returned as is + if denomTrace.Path == "" { + denom = packet.Denom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Parse the denom from the Recv Packet that will be used by the rate limit module +// The denom that the rate limiter will use for a RECEIVE packet depends on whether it was a source or sink +// +// Sink: The token moves forward, to a chain different than its previous hop +// The new port and channel are APPENDED to the denom trace. +// (e.g. A -> B, B is a sink) (e.g. A -> B -> C, C is a sink) +// +// Source: The token moves backwards (i.e. revisits the last chain it was sent from) +// The port and channel are REMOVED from the denom trace - undoing the last hop. +// (e.g. A -> B -> A, A is a source) (e.g. A -> B -> C -> B, B is a source) +// +// If the chain is acting as a SINK: We add on the Stride port and channel and hash it +// Ex1: uosmo sent from Osmosis to Stride +// Packet Denom: uosmo +// -> Add Prefix: transfer/channel-X/uosmo +// -> Hash: ibc/... +// +// Ex2: ujuno sent from Osmosis to Stride +// PacketDenom: transfer/channel-Y/ujuno (channel-Y is the Juno <> Osmosis channel) +// -> Add Prefix: transfer/channel-X/transfer/channel-Y/ujuno +// -> Hash: ibc/... +// +// If the chain is acting as a SOURCE: First, remove the prefix. Then if there is still a denom trace, hash it +// Ex1: ustrd sent back to Stride from Osmosis +// Packet Denom: transfer/channel-X/ustrd +// -> Remove Prefix: ustrd +// -> Leave as is: ustrd +// +// Ex2: juno was sent to Stride, then to Osmosis, then back to Stride +// Packet Denom: transfer/channel-X/transfer/channel-Z/ujuno +// -> Remove Prefix: transfer/channel-Z/ujuno +// -> Hash: ibc/... +func (k Keeper) ParseDenomFromRecvPacket(packet channeltypes.Packet, packetData transfertypes.FungibleTokenPacketData) (denom string) { + // To determine the denom, first check whether Stride is acting as source + if transfertypes.ReceiverChainIsSource(packet.GetSourcePort(), packet.GetSourceChannel(), packetData.Denom) { + // Remove the source prefix (e.g. transfer/channel-X/transfer/channel-Z/ujuno -> transfer/channel-Z/ujuno) + sourcePrefix := transfertypes.GetDenomPrefix(packet.GetSourcePort(), packet.GetSourceChannel()) + unprefixedDenom := packetData.Denom[len(sourcePrefix):] + + // Native assets will have an empty trace path and can be returned as is + denomTrace := transfertypes.ParseDenomTrace(unprefixedDenom) + if denomTrace.Path == "" { + denom = unprefixedDenom + } else { + // Non-native assets should be hashed + denom = denomTrace.IBCDenom() + } + } else { + // Prefix the destination channel - this will contain the trailing slash (e.g. transfer/channel-X/) + destinationPrefix := transfertypes.GetDenomPrefix(packet.GetDestPort(), packet.GetDestChannel()) + prefixedDenom := destinationPrefix + packetData.Denom + + // Hash the denom trace + denomTrace := transfertypes.ParseDenomTrace(prefixedDenom) + denom = denomTrace.IBCDenom() + } + + return denom +} + +// Parses the sender and channelId and denom for the corresponding RateLimit object, and +// the sender/receiver/transfer amount +// +// The Stride channelID should always be used as the key for the RateLimit object (not the counterparty channelID) +// For a SEND packet, the Stride channelID is the SOURCE channel +// For a RECEIVE packet, the Stride channelID is the DESTINATION channel +// +// The Source and Desination are defined from the perspective of a packet recipient +// Meaning, when a send packet lands on a the host chain, the "Source" will be the Stride Channel, +// and the "Destination" will be the Host Channel +// And, when a receive packet lands on a Stride, the "Source" will be the host zone's channel, +// and the "Destination" will be the Stride Channel +func (k Keeper) ParsePacketInfo(packet channeltypes.Packet, direction types.PacketDirection) (RateLimitedPacketInfo, error) { + var packetData transfertypes.FungibleTokenPacketData + if err := json.Unmarshal(packet.GetData(), &packetData); err != nil { + return RateLimitedPacketInfo{}, err + } + + var channelID, denom string + if direction == types.PACKET_SEND { + channelID = packet.GetSourceChannel() + denom = k.ParseDenomFromSendPacket(packetData) + } else { + channelID = packet.GetDestChannel() + denom = k.ParseDenomFromRecvPacket(packet, packetData) + } + + amount, ok := sdk.NewIntFromString(packetData.Amount) + if !ok { + return RateLimitedPacketInfo{}, + errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "Unable to cast packet amount '%s' to sdkmath.Int", packetData.Amount) + } + + packetInfo := RateLimitedPacketInfo{ + ChannelID: channelID, + Denom: denom, + Amount: amount, + Sender: packetData.Sender, + Receiver: packetData.Receiver, + } + + return packetInfo, nil +} + +// Middleware implementation for SendPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, sends the packet +func (k Keeper) SendRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + // Check if the packet would exceed the outflow rate limit + updatedFlow, err := k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_SEND, packetInfo) + if err != nil { + return err + } + // Store the sequence number of the packet so that if the transfer fails, + // we can identify if it was sent during this quota and can revert the outflow + if updatedFlow { + k.SetPendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + } + + return nil +} + +// Middleware implementation for RecvPacket with rate limiting +// Checks whether the rate limit has been exceeded - and if it hasn't, allows the packet +func (k Keeper) ReceiveRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_RECV) + if err != nil { + return err + } + + _, err = k.CheckRateLimitAndUpdateFlow(ctx, types.PACKET_RECV, packetInfo) + return err +} + +// Middleware implementation for OnAckPacket with rate limiting +// If the packet failed, we should decrement the Outflow +func (k Keeper) AcknowledgeRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet, ack []byte) error { + // Parse the denom, channelId, and amount from the packet + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + var acknowledgement channeltypes.Acknowledgement + if err := transfertypes.ModuleCdc.UnmarshalJSON(ack, &acknowledgement); err != nil { + return errorsmod.Wrapf(sdkerrors.ErrUnknownRequest, "cannot unmarshal ICS-20 transfer packet acknowledgement: %s", err.Error()) + } + + // The ack can come back as either AcknowledgementResult or AcknowledgementError + // If it comes back as AcknowledgementResult, the messages are encoded differently depending on the SDK version + switch response := acknowledgement.Response.(type) { + case *channeltypes.Acknowledgement_Result: + if len(response.Result) == 0 { + return errorsmod.Wrapf(channeltypes.ErrInvalidAcknowledgement, "acknowledgement result cannot be empty") + } + // If the ack was successful, remove the pending packet + k.RemovePendingSendPacket(ctx, packetInfo.ChannelID, packet.Sequence) + return nil + default: + // If the ack failed, undo the change to the rate limit Outflow + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) + } +} + +// Middleware implementation for OnAckPacket with rate limiting +// The Outflow should be decremented from the failed packet +func (k Keeper) TimeoutRateLimitedPacket(ctx sdk.Context, packet channeltypes.Packet) error { + packetInfo, err := k.ParsePacketInfo(packet, types.PACKET_SEND) + if err != nil { + return err + } + + return k.UndoSendPacket(ctx, packetInfo.ChannelID, packet.Sequence, packetInfo.Denom, packetInfo.Amount) +} + +// SendPacket wraps IBC ChannelKeeper's SendPacket function +// If the packet does not get rate limited, it passes the packet to the IBC Channel keeper +func (k Keeper) SendPacket( + ctx sdk.Context, + channelCap *capabilitytypes.Capability, + sourcePort string, + sourceChannel string, + timeoutHeight clienttypes.Height, + timeoutTimestamp uint64, + data []byte, +) (sequence uint64, err error) { + // The packet must first be sent up the stack to get the sequence number from the channel keeper + sequence, err = k.ics4Wrapper.SendPacket( + ctx, + channelCap, + sourcePort, + sourceChannel, + timeoutHeight, + timeoutTimestamp, + data, + ) + if err != nil { + return sequence, err + } + err = k.SendRateLimitedPacket(ctx, channeltypes.Packet{ + Sequence: sequence, + SourceChannel: sourceChannel, + SourcePort: sourcePort, + TimeoutHeight: timeoutHeight, + TimeoutTimestamp: timeoutTimestamp, + Data: data, + }) + if err != nil { + k.Logger(ctx).Error(fmt.Sprintf("ICS20 packet send was denied: %s", err.Error())) + return 0, err + } + + return sequence, err +} + +// WriteAcknowledgement wraps IBC ChannelKeeper's WriteAcknowledgement function +func (k Keeper) WriteAcknowledgement(ctx sdk.Context, chanCap *capabilitytypes.Capability, packet ibcexported.PacketI, acknowledgement ibcexported.Acknowledgement) error { + return k.ics4Wrapper.WriteAcknowledgement(ctx, chanCap, packet, acknowledgement) +} + +// GetAppVersion wraps IBC ChannelKeeper's GetAppVersion function +func (k Keeper) GetAppVersion(ctx sdk.Context, portID, channelID string) (string, bool) { + return k.ics4Wrapper.GetAppVersion(ctx, portID, channelID) +} diff --git a/x/ratelimit/keeper/rate_limit.go b/x/ratelimit/keeper/rate_limit.go new file mode 100644 index 000000000..41e9187b3 --- /dev/null +++ b/x/ratelimit/keeper/rate_limit.go @@ -0,0 +1,392 @@ +package keeper + +import ( + "encoding/binary" + "fmt" + "strings" + + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/store/prefix" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" +) + +// Get the rate limit byte key built from the denom and channelId +func GetRateLimitItemKey(denom string, channelId string) []byte { + return append(types.KeyPrefix(denom), types.KeyPrefix(channelId)...) +} + +// The total value on a given path (aka, the denominator in the percentage calculation) +// is the total supply of the given denom +func (k Keeper) GetChannelValue(ctx sdk.Context, denom string) math.Int { + return k.bankKeeper.GetSupply(ctx, denom).Amount +} + +// If the rate limit is exceeded or the denom is blacklisted, we emit an event +func EmitTransferDeniedEvent(ctx sdk.Context, reason, denom, channelId string, direction types.PacketDirection, amount math.Int, err error) { + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTransferDenied, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(types.AttributeKeyReason, reason), + sdk.NewAttribute(types.AttributeKeyAction, strings.ToLower(direction.String())), // packet_send or packet_recv + sdk.NewAttribute(types.AttributeKeyDenom, denom), + sdk.NewAttribute(types.AttributeKeyChannel, channelId), + sdk.NewAttribute(types.AttributeKeyAmount, amount.String()), + sdk.NewAttribute(types.AttributeKeyError, err.Error()), + ), + ) +} + +// Adds an amount to the flow in either the SEND or RECV direction +func (k Keeper) UpdateFlow(rateLimit types.RateLimit, direction types.PacketDirection, amount math.Int) error { + switch direction { + case types.PACKET_SEND: + return rateLimit.Flow.AddOutflow(amount, *rateLimit.Quota) + case types.PACKET_RECV: + return rateLimit.Flow.AddInflow(amount, *rateLimit.Quota) + default: + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "invalid packet direction (%s)", direction.String()) + } +} + +// Checks whether the given packet will exceed the rate limit +// Called by OnRecvPacket and OnSendPacket +func (k Keeper) CheckRateLimitAndUpdateFlow( + ctx sdk.Context, + direction types.PacketDirection, + packetInfo RateLimitedPacketInfo, +) (updatedFlow bool, err error) { + denom := packetInfo.Denom + channelId := packetInfo.ChannelID + amount := packetInfo.Amount + + // If there's no rate limit yet for this denom, no action is necessary + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return false, nil + } + + // Check if the sender/receiver pair is whitelisted + // If so, return a success without modifying the quota + if k.IsAddressPairWhitelisted(ctx, packetInfo.Sender, packetInfo.Receiver) { + return false, nil + } + // Update the flow object with the change in amount + if err := k.UpdateFlow(rateLimit, direction, amount); err != nil { + // If the rate limit was exceeded, emit an event + EmitTransferDeniedEvent(ctx, types.EventRateLimitExceeded, denom, channelId, direction, amount, err) + return false, err + } + // If there's no quota error, update the rate limit object in the store with the new flow + k.SetRateLimit(ctx, rateLimit) + + return true, nil +} + +// If a SendPacket fails or times out, undo the outflow increment that happened during the send +func (k Keeper) UndoSendPacket(ctx sdk.Context, channelId string, sequence uint64, denom string, amount math.Int) error { + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return nil + } + + // If the packet was sent during this quota, decrement the outflow + // Otherwise, it can be ignored + if k.CheckPacketSentDuringCurrentQuota(ctx, channelId, sequence) { + rateLimit.Flow.Outflow = rateLimit.Flow.Outflow.Sub(amount) + k.SetRateLimit(ctx, rateLimit) + + k.RemovePendingSendPacket(ctx, channelId, sequence) + } + + return nil +} + +// Reset the rate limit after expiration +// The inflow and outflow should get reset to 0, the channelValue should be updated, +// and all pending send packet sequence numbers should be removed +func (k Keeper) ResetRateLimit(ctx sdk.Context, denom string, channelId string) error { + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if channelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + rateLimit, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return types.ErrRateLimitNotFound + } + + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + rateLimit.Flow = &flow + + k.SetRateLimit(ctx, rateLimit) + k.RemoveAllChannelPendingSendPackets(ctx, channelId) + return nil +} + +// Stores/Updates a rate limit object in the store +func (k Keeper) SetRateLimit(ctx sdk.Context, rateLimit types.RateLimit) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(rateLimit.Path.Denom, rateLimit.Path.ChannelId) + rateLimitValue := k.cdc.MustMarshal(&rateLimit) + + store.Set(rateLimitKey, rateLimitValue) +} + +// Removes a rate limit object from the store using denom and channel-id +func (k Keeper) RemoveRateLimit(ctx sdk.Context, denom string, channelId string) error { + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if channelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + _, found := k.GetRateLimit(ctx, denom, channelId) + if !found { + return types.ErrRateLimitNotFound + } + + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + rateLimitKey := GetRateLimitItemKey(denom, channelId) + store.Delete(rateLimitKey) + + return nil +} + +// Grabs and returns a rate limit object from the store using denom and channel-id +func (k Keeper) GetRateLimit(ctx sdk.Context, denom string, channelId string) (rateLimit types.RateLimit, found bool) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + rateLimitKey := GetRateLimitItemKey(denom, channelId) + rateLimitValue := store.Get(rateLimitKey) + + if len(rateLimitValue) == 0 { + return rateLimit, false + } + + k.cdc.MustUnmarshal(rateLimitValue, &rateLimit) + return rateLimit, true +} + +// AddRateLimit +func (k Keeper) AddRateLimit(ctx sdk.Context, msg *types.MsgAddRateLimit) error { + // Check if this is denom - channel transfer from Picasso + denom := msg.Denom + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if msg.ChannelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + // Confirm the channel value is not zero + channelValue := k.GetChannelValue(ctx, denom) + if channelValue.IsZero() { + return errorsmod.Wrap(types.ErrZeroChannelValue, "zero channel value") + } + + // Confirm the rate limit does not already exist + _, found := k.GetRateLimit(ctx, denom, msg.ChannelId) + if found { + return errorsmod.Wrap(types.ErrRateLimitAlreadyExists, "rate limit already exists") + } + + // Create and store the rate limit object + path := types.Path{ + Denom: denom, + ChannelId: msg.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: channelValue, + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// UpdateRateLimit +func (k Keeper) UpdateRateLimit(ctx sdk.Context, msg *types.MsgUpdateRateLimit) error { + // Check if this is denom - channel transfer from Picasso + denom := msg.Denom + if k.tfmwKeeper.HasParachainIBCTokenInfoByNativeDenom(ctx, denom) { + tokenInfo := k.tfmwKeeper.GetParachainIBCTokenInfoByNativeDenom(ctx, denom) + if msg.ChannelId == tokenInfo.ChannelId { + denom = tokenInfo.IbcDenom + } + } + + // Confirm the rate limit exists + _, found := k.GetRateLimit(ctx, denom, msg.ChannelId) + if !found { + return errorsmod.Wrap(types.ErrRateLimitNotFound, "rate limit not found") + } + + // Update the rate limit object with the new quota information + // The flow should also get reset to 0 + path := types.Path{ + Denom: denom, + ChannelId: msg.ChannelId, + } + quota := types.Quota{ + MaxPercentSend: msg.MaxPercentSend, + MaxPercentRecv: msg.MaxPercentRecv, + DurationHours: msg.DurationHours, + } + flow := types.Flow{ + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + ChannelValue: k.GetChannelValue(ctx, denom), + } + + k.SetRateLimit(ctx, types.RateLimit{ + Path: &path, + Quota: "a, + Flow: &flow, + }) + + return nil +} + +// Returns all rate limits stored +func (k Keeper) GetAllRateLimits(ctx sdk.Context) []types.RateLimit { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.RateLimitKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allRateLimits := []types.RateLimit{} + for ; iterator.Valid(); iterator.Next() { + + rateLimit := types.RateLimit{} + k.cdc.MustUnmarshal(iterator.Value(), &rateLimit) + allRateLimits = append(allRateLimits, rateLimit) + } + + return allRateLimits +} + +// Sets the sequence number of a packet that was just sent +func (k Keeper) SetPendingSendPacket(ctx sdk.Context, channelId string, sequence uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + store.Set(key, []byte{1}) +} + +// Remove a pending packet sequence number from the store +// Used after the ack or timeout for a packet has been received +func (k Keeper) RemovePendingSendPacket(ctx sdk.Context, channelId string, sequence uint64) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + store.Delete(key) +} + +// Checks whether the packet sequence number is in the store - indicating that it was +// sent during the current quota +func (k Keeper) CheckPacketSentDuringCurrentQuota(ctx sdk.Context, channelId string, sequence uint64) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + key := types.GetPendingSendPacketKey(channelId, sequence) + valueBz := store.Get(key) + found := len(valueBz) != 0 + return found +} + +// Get all pending packet sequence numbers +func (k Keeper) GetAllPendingSendPackets(ctx sdk.Context) []string { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + pendingPackets := []string{} + for ; iterator.Valid(); iterator.Next() { + key := iterator.Key() + + channelId := string(key[:types.PendingSendPacketChannelLength]) + channelId = strings.TrimRight(channelId, "\x00") // removes null bytes from suffix + sequence := binary.BigEndian.Uint64(key[types.PendingSendPacketChannelLength:]) + + packetId := fmt.Sprintf("%s/%d", channelId, sequence) + pendingPackets = append(pendingPackets, packetId) + } + + return pendingPackets +} + +// Remove all pending sequence numbers from the store +// This is executed when the quota resets +func (k Keeper) RemoveAllChannelPendingSendPackets(ctx sdk.Context, channelId string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.PendingSendPacketPrefix) + + iterator := sdk.KVStorePrefixIterator(store, types.KeyPrefix(channelId)) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + store.Delete(iterator.Key()) + } +} + +// Adds an pair of sender and receiver addresses to the whitelist to allow all +// IBC transfers between those addresses to skip all flow calculations +func (k Keeper) SetWhitelistedAddressPair(ctx sdk.Context, whitelist types.WhitelistedAddressPair) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + key := types.GetAddressWhitelistKey(whitelist.Sender, whitelist.Receiver) + value := k.cdc.MustMarshal(&whitelist) + store.Set(key, value) +} + +// Removes a whitelisted address pair so that it's transfers are counted in the quota +func (k Keeper) RemoveWhitelistedAddressPair(ctx sdk.Context, sender, receiver string) { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + key := types.GetAddressWhitelistKey(sender, receiver) + store.Delete(key) +} + +// Check if a sender/receiver address pair is currently whitelisted +func (k Keeper) IsAddressPairWhitelisted(ctx sdk.Context, sender, receiver string) bool { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + + key := types.GetAddressWhitelistKey(sender, receiver) + value := store.Get(key) + found := len(value) != 0 + + return found +} + +// Get all the whitelisted addresses +func (k Keeper) GetAllWhitelistedAddressPairs(ctx sdk.Context) []types.WhitelistedAddressPair { + store := prefix.NewStore(ctx.KVStore(k.storeKey), types.AddressWhitelistKeyPrefix) + + iterator := store.Iterator(nil, nil) + defer iterator.Close() + + allWhitelistedAddresses := []types.WhitelistedAddressPair{} + for ; iterator.Valid(); iterator.Next() { + whitelist := types.WhitelistedAddressPair{} + k.cdc.MustUnmarshal(iterator.Value(), &whitelist) + allWhitelistedAddresses = append(allWhitelistedAddresses, whitelist) + } + + return allWhitelistedAddresses +} diff --git a/x/ratelimit/module.go b/x/ratelimit/module.go new file mode 100644 index 000000000..ab0dcb8f9 --- /dev/null +++ b/x/ratelimit/module.go @@ -0,0 +1,158 @@ +package ratelimit + +import ( + "context" + "encoding/json" + "fmt" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + simtypes "github.com/cosmos/cosmos-sdk/types/simulation" + "github.com/gorilla/mux" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/notional-labs/centauri/v3/x/ratelimit/client/cli" + "github.com/notional-labs/centauri/v3/x/ratelimit/keeper" + "github.com/notional-labs/centauri/v3/x/ratelimit/types" + "github.com/spf13/cobra" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ module.AppModuleSimulation = AppModule{} +) + +// AppModuleBasic is the router AppModuleBasic +type AppModuleBasic struct{} + +// Name implements AppModuleBasic interface +func (AppModuleBasic) Name() string { + return types.ModuleName +} + +// RegisterLegacyAminoCodec implements AppModuleBasic interface +func (AppModuleBasic) RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + types.RegisterLegacyAminoCodec(cdc) +} + +// RegisterInterfaces registers module concrete types into protobuf Any. +func (AppModuleBasic) RegisterInterfaces(reg codectypes.InterfaceRegistry) { + types.RegisterInterfaces(reg) +} + +// DefaultGenesis returns default genesis state as raw bytes for the ibc +// router module. +func (AppModuleBasic) DefaultGenesis(cdc codec.JSONCodec) json.RawMessage { + return cdc.MustMarshalJSON(types.DefaultGenesisState()) +} + +// RegisterRESTRoutes implements AppModuleBasic interface +func (AppModuleBasic) RegisterRESTRoutes(_ client.Context, _ *mux.Router) {} + +// RegisterGRPCGatewayRoutes registers the gRPC Gateway routes for the module. +func (AppModuleBasic) RegisterGRPCGatewayRoutes(clientCtx client.Context, mux *runtime.ServeMux) { + if err := types.RegisterQueryHandlerClient(context.Background(), mux, types.NewQueryClient(clientCtx)); err != nil { + panic(err) + } +} + +// GetTxCmd implements AppModuleBasic interface +func (AppModuleBasic) GetTxCmd() *cobra.Command { + return nil +} + +// GetQueryCmd implements AppModuleBasic interface +func (AppModuleBasic) GetQueryCmd() *cobra.Command { + return cli.GetQueryCmd() +} + +// AppModule represents the AppModule for this module +type AppModule struct { + AppModuleBasic + keeper *keeper.Keeper +} + +// NewAppModule creates a new router module +func NewAppModule(k *keeper.Keeper) AppModule { + return AppModule{ + AppModuleBasic: AppModuleBasic{}, + keeper: k, + } +} + +// Name returns the capability module's name. +func (am AppModule) Name() string { + return am.AppModuleBasic.Name() +} + +// QuerierRoute implements the AppModule interface +func (AppModule) QuerierRoute() string { + return types.QuerierRoute +} + +// RegisterServices registers module services. +func (am AppModule) RegisterServices(cfg module.Configurator) { + types.RegisterQueryServer(cfg.QueryServer(), am.keeper) + types.RegisterMsgServer(cfg.MsgServer(), keeper.NewMsgServerImpl(*am.keeper)) +} + +// InitGenesis performs genesis initialization for the ibc-router module. It returns +// no validator updates. +func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { + var genesisState types.GenesisState + cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, genesisState) + return []abci.ValidatorUpdate{} +} + +// ValidateGenesis performs genesis state validation for the mint module. +func (AppModuleBasic) ValidateGenesis(cdc codec.JSONCodec, _ client.TxEncodingConfig, bz json.RawMessage) error { + var data types.GenesisState + if err := cdc.UnmarshalJSON(bz, &data); err != nil { + return fmt.Errorf("failed to unmarshal %s genesis state: %w", types.ModuleName, err) + } + + return types.ValidateGenesis(data) +} + +// ExportGenesis returns the exported genesis state as raw bytes for the ibc-router +// module. +func (am AppModule) ExportGenesis(ctx sdk.Context, cdc codec.JSONCodec) json.RawMessage { + gs := am.keeper.ExportGenesis(ctx) + return cdc.MustMarshalJSON(gs) +} + +// ConsensusVersion implements AppModule/ConsensusVersion. +func (AppModule) ConsensusVersion() uint64 { return 1 } + +// BeginBlock implements the AppModule interface +func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { + am.keeper.BeginBlocker(ctx) +} + +// EndBlock implements the AppModule interface +func (am AppModule) EndBlock(_ sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { + return []abci.ValidatorUpdate{} +} + +// AppModuleSimulation functions + +// GenerateGenesisState creates a randomized GenState of the router module. +func (AppModule) GenerateGenesisState(_ *module.SimulationState) {} + +// ProposalContents doesn't return any content functions for governance proposals. +func (AppModule) ProposalContents(_ module.SimulationState) []simtypes.WeightedProposalContent { //nolint:staticcheck // WeightedProposalContent is necessary to satisfy the module interface + return nil +} + +// RegisterStoreDecoder registers a decoder for router module's types +func (am AppModule) RegisterStoreDecoder(_ sdk.StoreDecoderRegistry) {} + +// WeightedOperations returns the all the router module operations with their respective weights. +func (am AppModule) WeightedOperations(_ module.SimulationState) []simtypes.WeightedOperation { + return nil +} diff --git a/x/ratelimit/relay_test.go b/x/ratelimit/relay_test.go new file mode 100644 index 000000000..c55b4d702 --- /dev/null +++ b/x/ratelimit/relay_test.go @@ -0,0 +1,261 @@ +package ratelimit_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + customibctesting "github.com/notional-labs/centauri/v3/app/ibctesting" + ratelimittypes "github.com/notional-labs/centauri/v3/x/ratelimit/types" + "github.com/stretchr/testify/suite" +) + +type RateLimitTestSuite struct { + suite.Suite + + coordinator *customibctesting.Coordinator + + // testing chains used for convenience and readability + chainA *customibctesting.TestChain + chainB *customibctesting.TestChain + chainC *customibctesting.TestChain +} + +func (suite *RateLimitTestSuite) SetupTest() { + suite.coordinator = customibctesting.NewCoordinator(suite.T(), 4) + suite.chainA = suite.coordinator.GetChain(customibctesting.GetChainID(1)) + suite.chainB = suite.coordinator.GetChain(customibctesting.GetChainID(2)) + suite.chainC = suite.coordinator.GetChain(customibctesting.GetChainID(3)) +} + +func NewTransferPath(chainA, chainB *customibctesting.TestChain) *customibctesting.Path { + path := customibctesting.NewPath(chainA, chainB) + path.EndpointA.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointB.ChannelConfig.PortID = customibctesting.TransferPort + path.EndpointA.ChannelConfig.Version = transfertypes.Version + path.EndpointB.ChannelConfig.Version = transfertypes.Version + + return path +} + +func TestKeeperTestSuite(t *testing.T) { + suite.Run(t, new(RateLimitTestSuite)) +} + +func (suite *RateLimitTestSuite) TestReceiveIBCToken() { + var ( + transferAmount = sdk.NewInt(1000000000) + // when transfer via sdk transfer from A (module) -> B (contract) + ibcDenom = "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878" + nativeDenom = "ppica" + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + nativeTokenReceiveOnChainB = sdk.NewCoin(nativeDenom, transferAmount) + timeoutHeight = clienttypes.NewHeight(1, 110) + expChainABalanceDiff = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // Add parachain token info + chainBtransMiddlewareKeeper := suite.chainB.TransferMiddleware() + err := chainBtransMiddlewareKeeper.AddParachainIBCInfo(suite.chainB.GetContext(), ibcDenom, path.EndpointB.ChannelID, nativeDenom, sdk.DefaultBondDenom) + suite.Require().NoError(err) + + originalChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + originalChainBBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + msg := transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and source chain balance was decreased + newChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainABalance.Sub(expChainABalanceDiff), newChainABalance) + + // and dest chain balance contains voucher + expBalance := originalChainBBalance.Add(nativeTokenReceiveOnChainB) + gotBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + suite.Require().Equal(expBalance, gotBalance) + + // add rate limit + chainBRateLimitKeeper := suite.chainB.RateLimit() + msgAddRateLimit := ratelimittypes.MsgAddRateLimit{ + Denom: nativeDenom, + ChannelId: path.EndpointB.ChannelID, + MaxPercentSend: sdk.NewInt(5), + MaxPercentRecv: sdk.NewInt(5), + DurationHours: 1, + } + err = chainBRateLimitKeeper.AddRateLimit(suite.chainB.GetContext(), &msgAddRateLimit) + suite.Require().NoError(err) + + // send from A to B + transferAmount = transferAmount.Mul(sdk.NewInt(5)).Quo(sdk.NewInt(100)) + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + msg = transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + expBalance = expBalance.Add(sdk.NewCoin(nativeDenom, transferAmount)) + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) + + // send 1 more time + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // not receive token because catch the threshold => balances have no change + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) +} + +func (suite *RateLimitTestSuite) TestSendIBCToken() { + var ( + transferAmount = sdk.NewInt(1000000000) + // when transfer via sdk transfer from A (module) -> B (contract) + ibcDenom = "ibc/C053D637CCA2A2BA030E2C5EE1B28A16F71CCB0E45E8BE52766DC1B241B77878" + nativeDenom = "ppica" + nativeTokenSendOnChainA = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + nativeTokenReceiveOnChainB = sdk.NewCoin(nativeDenom, transferAmount) + timeoutHeight = clienttypes.NewHeight(1, 110) + expChainABalanceDiff = sdk.NewCoin(sdk.DefaultBondDenom, transferAmount) + ) + + suite.SetupTest() // reset + + path := NewTransferPath(suite.chainA, suite.chainB) + suite.coordinator.Setup(path) + + // Add parachain token info + chainBtransMiddlewareKeeper := suite.chainB.TransferMiddleware() + err := chainBtransMiddlewareKeeper.AddParachainIBCInfo(suite.chainB.GetContext(), ibcDenom, path.EndpointB.ChannelID, nativeDenom, sdk.DefaultBondDenom) + suite.Require().NoError(err) + + originalChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + originalChainBBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + msg := transfertypes.NewMsgTransfer(path.EndpointA.ChannelConfig.PortID, path.EndpointA.ChannelID, nativeTokenSendOnChainA, suite.chainA.SenderAccount.GetAddress().String(), suite.chainB.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainA.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointB.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPackets(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + // and source chain balance was decreased + newChainABalance := suite.chainA.AllBalances(suite.chainA.SenderAccount.GetAddress()) + suite.Require().Equal(originalChainABalance.Sub(expChainABalanceDiff), newChainABalance) + + // and dest chain balance contains voucher + expBalance := originalChainBBalance.Add(nativeTokenReceiveOnChainB) + gotBalance := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + + suite.Require().Equal(expBalance, gotBalance) + + originalChainBBalance = gotBalance + // add rate limit + chainBRateLimitKeeper := suite.chainB.RateLimit() + msgAddRateLimit := ratelimittypes.MsgAddRateLimit{ + Denom: nativeDenom, + ChannelId: path.EndpointB.ChannelID, + MaxPercentSend: sdk.NewInt(5), + MaxPercentRecv: sdk.NewInt(5), + DurationHours: 1, + } + err = chainBRateLimitKeeper.AddRateLimit(suite.chainB.GetContext(), &msgAddRateLimit) + suite.Require().NoError(err) + + // send from B to A + transferAmount = transferAmount.Mul(sdk.NewInt(5)).Quo(sdk.NewInt(100)) + nativeTokenSendOnChainB := sdk.NewCoin(nativeDenom, transferAmount) + msg = transfertypes.NewMsgTransfer(path.EndpointB.ChannelConfig.PortID, path.EndpointB.ChannelID, nativeTokenSendOnChainB, suite.chainB.SenderAccount.GetAddress().String(), suite.chainA.SenderAccount.GetAddress().String(), timeoutHeight, 0, "") + _, err = suite.chainB.SendMsgs(msg) + suite.Require().NoError(err) + suite.Require().NoError(err, path.EndpointA.UpdateClient()) + + // then + suite.Require().Equal(1, len(suite.chainB.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + + // and when relay to chain B and handle Ack on chain A + err = suite.coordinator.RelayAndAckPendingPacketsReverse(path) + suite.Require().NoError(err) + + // then + suite.Require().Equal(0, len(suite.chainA.PendingSendPackets)) + suite.Require().Equal(0, len(suite.chainB.PendingSendPackets)) + + expBalance = originalChainBBalance.Sub(nativeTokenSendOnChainB) + gotBalance = suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, gotBalance) + + // send 1 more time + _, err = suite.chainB.SendMsgsWithExpPass(false, msg) + suite.Require().Error(err) // catch the threshold so should not be sent + + // SignAndDeliver calls app.Commit() + suite.chainB.NextBlock() + + // increment sequence for successful transaction execution + err = suite.chainB.SenderAccount.SetSequence(suite.chainB.SenderAccount.GetSequence() + 1) + suite.Require().NoError(err) + + suite.chainB.Coordinator.IncrementTime() + + // not receive token because catch the threshold => balances have no change + balances := suite.chainB.AllBalances(suite.chainB.SenderAccount.GetAddress()) + suite.Require().Equal(expBalance, balances) +} diff --git a/x/ratelimit/types/codec.go b/x/ratelimit/types/codec.go new file mode 100644 index 000000000..9902cbe95 --- /dev/null +++ b/x/ratelimit/types/codec.go @@ -0,0 +1,51 @@ +package types + +import ( + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/legacy" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/types/msgservice" + authzcodec "github.com/cosmos/cosmos-sdk/x/authz/codec" + govcodec "github.com/cosmos/cosmos-sdk/x/gov/codec" + groupcodec "github.com/cosmos/cosmos-sdk/x/group/codec" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// RegisterLegacyAminoCodec registers the account interfaces and concrete types on the +// provided LegacyAmino codec. These types are used for Amino JSON serialization +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + legacy.RegisterAminoMsg(cdc, &MsgAddRateLimit{}, "centauri/MsgAddRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgUpdateRateLimit{}, "centauri/MsgUpdateRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgRemoveRateLimit{}, "centauri/MsgRemoveRateLimit") + legacy.RegisterAminoMsg(cdc, &MsgResetRateLimit{}, "centauri/MsgResetRateLimit") +} + +func RegisterInterfaces(registry codectypes.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgAddRateLimit{}, + &MsgUpdateRateLimit{}, + &MsgRemoveRateLimit{}, + &MsgResetRateLimit{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} + +var ( + amino = codec.NewLegacyAmino() + ModuleCdc = codec.NewAminoCodec(amino) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + sdk.RegisterLegacyAminoCodec(amino) + + // Register all Amino interfaces and concrete types on the authz and gov Amino codec so that this can later be + // used to properly serialize MsgGrant, MsgExec and MsgSubmitProposal instances + RegisterLegacyAminoCodec(authzcodec.Amino) + RegisterLegacyAminoCodec(govcodec.Amino) + RegisterLegacyAminoCodec(groupcodec.Amino) +} diff --git a/x/ratelimit/types/epoch.go b/x/ratelimit/types/epoch.go new file mode 100644 index 000000000..6ad9be68b --- /dev/null +++ b/x/ratelimit/types/epoch.go @@ -0,0 +1,40 @@ +package types + +import ( + "errors" + "time" +) + +var ( + DAY_EPOCH = "hour" + EpochHourPeriod time.Duration = time.Hour * 24 +) + +// Validate also validates epoch info. +func (epoch EpochInfo) Validate() error { + if epoch.Identifier == "" { + return errors.New("epoch identifier should NOT be empty") + } + if epoch.Duration == 0 { + return errors.New("epoch duration should NOT be 0") + } + if epoch.CurrentEpoch < 0 { + return errors.New("epoch CurrentEpoch must be non-negative") + } + if epoch.CurrentEpochStartHeight < 0 { + return errors.New("epoch CurrentEpoch must be non-negative") + } + return nil +} + +func NewGenesisEpochInfo(identifier string, duration time.Duration) EpochInfo { + return EpochInfo{ + Identifier: identifier, + StartTime: time.Time{}, + Duration: duration, + CurrentEpoch: 0, + CurrentEpochStartHeight: 0, + CurrentEpochStartTime: time.Time{}, + EpochCountingStarted: false, + } +} diff --git a/x/ratelimit/types/epoch.pb.go b/x/ratelimit/types/epoch.pb.go new file mode 100644 index 000000000..78b7d7016 --- /dev/null +++ b/x/ratelimit/types/epoch.pb.go @@ -0,0 +1,636 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/epoch.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/durationpb" + _ "google.golang.org/protobuf/types/known/timestamppb" + io "io" + math "math" + math_bits "math/bits" + time "time" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type EpochInfo struct { + // identifier is a unique reference to this particular timer. + Identifier string `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"` + // start_time is the time at which the timer first ever ticks. + // If start_time is in the future, the epoch will not begin until the start + // time. + StartTime time.Time `protobuf:"bytes,2,opt,name=start_time,json=startTime,proto3,stdtime" json:"start_time" yaml:"start_time"` + // duration is the time in between epoch ticks. + // In order for intended behavior to be met, duration should + // be greater than the chains expected block time. + // Duration must be non-zero. + Duration time.Duration `protobuf:"bytes,3,opt,name=duration,proto3,stdduration" json:"duration,omitempty" yaml:"duration"` + // current_epoch is the current epoch number, or in other words, + // how many times has the timer 'ticked'. + // The first tick (current_epoch=1) is defined as + // the first block whose blocktime is greater than the EpochInfo start_time. + CurrentEpoch int64 `protobuf:"varint,4,opt,name=current_epoch,json=currentEpoch,proto3" json:"current_epoch,omitempty"` + // current_epoch_start_time describes the start time of the current timer + // interval. The interval is (current_epoch_start_time, + // current_epoch_start_time + duration] When the timer ticks, this is set to + // current_epoch_start_time = last_epoch_start_time + duration only one timer + // tick for a given identifier can occur per block. + // + // NOTE! The current_epoch_start_time may diverge significantly from the + // wall-clock time the epoch began at. Wall-clock time of epoch start may be + // >> current_epoch_start_time. Suppose current_epoch_start_time = 10, + // duration = 5. Suppose the chain goes offline at t=14, and comes back online + // at t=30, and produces blocks at every successive time. (t=31, 32, etc.) + // * The t=30 block will start the epoch for (10, 15] + // * The t=31 block will start the epoch for (15, 20] + // * The t=32 block will start the epoch for (20, 25] + // * The t=33 block will start the epoch for (25, 30] + // * The t=34 block will start the epoch for (30, 35] + // * The **t=36** block will start the epoch for (35, 40] + CurrentEpochStartTime time.Time `protobuf:"bytes,5,opt,name=current_epoch_start_time,json=currentEpochStartTime,proto3,stdtime" json:"current_epoch_start_time" yaml:"current_epoch_start_time"` + // epoch_counting_started is a boolean, that indicates whether this + // epoch timer has began yet. + EpochCountingStarted bool `protobuf:"varint,6,opt,name=epoch_counting_started,json=epochCountingStarted,proto3" json:"epoch_counting_started,omitempty"` + // current_epoch_start_height is the block height at which the current epoch + // started. (The block height at which the timer last ticked) + CurrentEpochStartHeight int64 `protobuf:"varint,8,opt,name=current_epoch_start_height,json=currentEpochStartHeight,proto3" json:"current_epoch_start_height,omitempty"` +} + +func (m *EpochInfo) Reset() { *m = EpochInfo{} } +func (m *EpochInfo) String() string { return proto.CompactTextString(m) } +func (*EpochInfo) ProtoMessage() {} +func (*EpochInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2d9490a2e433bc10, []int{0} +} +func (m *EpochInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EpochInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_EpochInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *EpochInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_EpochInfo.Merge(m, src) +} +func (m *EpochInfo) XXX_Size() int { + return m.Size() +} +func (m *EpochInfo) XXX_DiscardUnknown() { + xxx_messageInfo_EpochInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_EpochInfo proto.InternalMessageInfo + +func (m *EpochInfo) GetIdentifier() string { + if m != nil { + return m.Identifier + } + return "" +} + +func (m *EpochInfo) GetStartTime() time.Time { + if m != nil { + return m.StartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetDuration() time.Duration { + if m != nil { + return m.Duration + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpoch() int64 { + if m != nil { + return m.CurrentEpoch + } + return 0 +} + +func (m *EpochInfo) GetCurrentEpochStartTime() time.Time { + if m != nil { + return m.CurrentEpochStartTime + } + return time.Time{} +} + +func (m *EpochInfo) GetEpochCountingStarted() bool { + if m != nil { + return m.EpochCountingStarted + } + return false +} + +func (m *EpochInfo) GetCurrentEpochStartHeight() int64 { + if m != nil { + return m.CurrentEpochStartHeight + } + return 0 +} + +func init() { + proto.RegisterType((*EpochInfo)(nil), "centauri.ratelimit.v1beta1.EpochInfo") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/epoch.proto", fileDescriptor_2d9490a2e433bc10) +} + +var fileDescriptor_2d9490a2e433bc10 = []byte{ + // 425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x41, 0x8f, 0x93, 0x40, + 0x14, 0xc7, 0x19, 0xb7, 0xae, 0x74, 0xd4, 0xe8, 0x4e, 0x56, 0x45, 0x12, 0x07, 0x82, 0x89, 0x21, + 0x59, 0x03, 0xa9, 0x7a, 0xd2, 0x5b, 0xd5, 0x44, 0x3d, 0xb2, 0x1e, 0x8c, 0x97, 0x86, 0xd2, 0x29, + 0x4c, 0x52, 0x18, 0x32, 0x7d, 0x18, 0x7b, 0xf3, 0x23, 0xf4, 0xe8, 0x47, 0xda, 0xe3, 0x1e, 0x3d, + 0xa1, 0x69, 0x6f, 0x1e, 0xf7, 0x13, 0x18, 0x66, 0xa0, 0xa2, 0xd5, 0xec, 0x0d, 0xde, 0xff, 0xf7, + 0xfe, 0xff, 0xf7, 0x5e, 0x06, 0x3f, 0x4a, 0x58, 0x01, 0x71, 0x25, 0x79, 0x28, 0x63, 0x60, 0x0b, + 0x9e, 0x73, 0x08, 0x3f, 0x8d, 0xa6, 0x0c, 0xe2, 0x51, 0xc8, 0x4a, 0x91, 0x64, 0x41, 0x29, 0x05, + 0x08, 0x62, 0x77, 0x5c, 0xb0, 0xe3, 0x82, 0x96, 0xb3, 0x8f, 0x53, 0x91, 0x0a, 0x85, 0x85, 0xcd, + 0x97, 0xee, 0xb0, 0x69, 0x2a, 0x44, 0xba, 0x60, 0xa1, 0xfa, 0x9b, 0x56, 0xf3, 0x70, 0x56, 0xc9, + 0x18, 0xb8, 0x28, 0x5a, 0xdd, 0xf9, 0x5b, 0x07, 0x9e, 0xb3, 0x25, 0xc4, 0x79, 0xa9, 0x01, 0x6f, + 0x3d, 0xc0, 0xc3, 0xd7, 0xcd, 0x08, 0x6f, 0x8b, 0xb9, 0x20, 0x14, 0x63, 0x3e, 0x63, 0x05, 0xf0, + 0x39, 0x67, 0xd2, 0x42, 0x2e, 0xf2, 0x87, 0x51, 0xaf, 0x42, 0x3e, 0x60, 0xbc, 0x84, 0x58, 0xc2, + 0xa4, 0xb1, 0xb1, 0xae, 0xb8, 0xc8, 0xbf, 0xfe, 0xc4, 0x0e, 0x74, 0x46, 0xd0, 0x65, 0x04, 0xef, + 0xbb, 0x8c, 0xf1, 0x83, 0xb3, 0xda, 0x31, 0x2e, 0x6a, 0xe7, 0x68, 0x15, 0xe7, 0x8b, 0xe7, 0xde, + 0xef, 0x5e, 0x6f, 0xfd, 0xdd, 0x41, 0xd1, 0x50, 0x15, 0x1a, 0x9c, 0x64, 0xd8, 0xec, 0x46, 0xb7, + 0x0e, 0x94, 0xef, 0xfd, 0x3d, 0xdf, 0x57, 0x2d, 0x30, 0x1e, 0x35, 0xb6, 0x3f, 0x6b, 0x87, 0x74, + 0x2d, 0x8f, 0x45, 0xce, 0x81, 0xe5, 0x25, 0xac, 0x2e, 0x6a, 0xe7, 0x96, 0x0e, 0xeb, 0x34, 0xef, + 0x6b, 0x13, 0xb5, 0x73, 0x27, 0x0f, 0xf1, 0xcd, 0xa4, 0x92, 0x92, 0x15, 0x30, 0x51, 0xb7, 0xb7, + 0x06, 0x2e, 0xf2, 0x0f, 0xa2, 0x1b, 0x6d, 0x51, 0x1d, 0x83, 0x7c, 0x41, 0xd8, 0xfa, 0x83, 0x9a, + 0xf4, 0xf6, 0xbe, 0x7a, 0xe9, 0xde, 0x27, 0xed, 0xde, 0x8e, 0x1e, 0xe5, 0x7f, 0x4e, 0xfa, 0x0a, + 0x77, 0xfa, 0xc9, 0xa7, 0xbb, 0x8b, 0x3c, 0xc3, 0x77, 0x35, 0x9f, 0x88, 0xaa, 0x00, 0x5e, 0xa4, + 0xba, 0x91, 0xcd, 0xac, 0x43, 0x17, 0xf9, 0x66, 0x74, 0xac, 0xd4, 0x97, 0xad, 0x78, 0xaa, 0x35, + 0xf2, 0x02, 0xdb, 0xff, 0x4a, 0xcb, 0x18, 0x4f, 0x33, 0xb0, 0x4c, 0xb5, 0xea, 0xbd, 0xbd, 0xc0, + 0x37, 0x4a, 0x7e, 0x37, 0x30, 0xaf, 0xdd, 0x36, 0xc7, 0x27, 0x67, 0x1b, 0x8a, 0xce, 0x37, 0x14, + 0xfd, 0xd8, 0x50, 0xb4, 0xde, 0x52, 0xe3, 0x7c, 0x4b, 0x8d, 0x6f, 0x5b, 0x6a, 0x7c, 0x3c, 0xfa, + 0xdc, 0x7b, 0xc0, 0xb0, 0x2a, 0xd9, 0x72, 0x7a, 0xa8, 0xb6, 0x7f, 0xfa, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0xa1, 0x47, 0xfe, 0xe3, 0x02, 0x00, 0x00, +} + +func (m *EpochInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EpochInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EpochInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentEpochStartHeight != 0 { + i = encodeVarintEpoch(dAtA, i, uint64(m.CurrentEpochStartHeight)) + i-- + dAtA[i] = 0x40 + } + if m.EpochCountingStarted { + i-- + if m.EpochCountingStarted { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + n1, err1 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.CurrentEpochStartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime):]) + if err1 != nil { + return 0, err1 + } + i -= n1 + i = encodeVarintEpoch(dAtA, i, uint64(n1)) + i-- + dAtA[i] = 0x2a + if m.CurrentEpoch != 0 { + i = encodeVarintEpoch(dAtA, i, uint64(m.CurrentEpoch)) + i-- + dAtA[i] = 0x20 + } + n2, err2 := github_com_cosmos_gogoproto_types.StdDurationMarshalTo(m.Duration, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration):]) + if err2 != nil { + return 0, err2 + } + i -= n2 + i = encodeVarintEpoch(dAtA, i, uint64(n2)) + i-- + dAtA[i] = 0x1a + n3, err3 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.StartTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintEpoch(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x12 + if len(m.Identifier) > 0 { + i -= len(m.Identifier) + copy(dAtA[i:], m.Identifier) + i = encodeVarintEpoch(dAtA, i, uint64(len(m.Identifier))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintEpoch(dAtA []byte, offset int, v uint64) int { + offset -= sovEpoch(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *EpochInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovEpoch(uint64(l)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.StartTime) + n += 1 + l + sovEpoch(uint64(l)) + l = github_com_cosmos_gogoproto_types.SizeOfStdDuration(m.Duration) + n += 1 + l + sovEpoch(uint64(l)) + if m.CurrentEpoch != 0 { + n += 1 + sovEpoch(uint64(m.CurrentEpoch)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.CurrentEpochStartTime) + n += 1 + l + sovEpoch(uint64(l)) + if m.EpochCountingStarted { + n += 2 + } + if m.CurrentEpochStartHeight != 0 { + n += 1 + sovEpoch(uint64(m.CurrentEpochStartHeight)) + } + return n +} + +func sovEpoch(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEpoch(x uint64) (n int) { + return sovEpoch(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *EpochInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EpochInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EpochInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Identifier = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.StartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdDurationUnmarshal(&m.Duration, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpoch", wireType) + } + m.CurrentEpoch = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpoch |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthEpoch + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthEpoch + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.CurrentEpochStartTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EpochCountingStarted", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.EpochCountingStarted = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentEpochStartHeight", wireType) + } + m.CurrentEpochStartHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEpoch + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentEpochStartHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEpoch(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEpoch + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEpoch(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEpoch + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEpoch + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEpoch + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEpoch + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEpoch = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEpoch = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEpoch = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/errors.go b/x/ratelimit/types/errors.go new file mode 100644 index 000000000..c362de065 --- /dev/null +++ b/x/ratelimit/types/errors.go @@ -0,0 +1,16 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +// x/ratelimit module sentinel errors +var ( + ErrRateLimitAlreadyExists = errorsmod.Register(ModuleName, 1, "ratelimit key duplicated") + ErrRateLimitNotFound = errorsmod.Register(ModuleName, 2, "rate limit not found") + ErrZeroChannelValue = errorsmod.Register(ModuleName, 3, "channel value is zero") + ErrQuotaExceeded = errorsmod.Register(ModuleName, 4, "quota exceeded") + ErrInvalidClientState = errorsmod.Register(ModuleName, 5, "unable to determine client state from channelId") + ErrChannelNotFound = errorsmod.Register(ModuleName, 6, "channel does not exist") + ErrDenomIsBlacklisted = errorsmod.Register(ModuleName, 7, "denom is blacklisted") +) diff --git a/x/ratelimit/types/events.go b/x/ratelimit/types/events.go new file mode 100644 index 000000000..a63cde3c2 --- /dev/null +++ b/x/ratelimit/types/events.go @@ -0,0 +1,20 @@ +package types + +var ( + EventTransferDenied = "transfer_denied" + + EventRateLimitExceeded = "rate_limit_exceeded" + + AttributeKeyReason = "reason" + AttributeKeyModule = "module" + AttributeKeyAction = "action" + AttributeKeyDenom = "denom" + AttributeKeyChannel = "channel" + AttributeKeyAmount = "amount" + AttributeKeyError = "error" + + EventTypeEpochEnd = "epoch_end" // TODO: need to clean up (not use) + EventTypeEpochStart = "epoch_start" + AttributeEpochNumber = "epoch_number" + AttributeEpochStartTime = "start_time" +) diff --git a/x/ratelimit/types/expected_keepers.go b/x/ratelimit/types/expected_keepers.go new file mode 100644 index 000000000..e1f5bd29a --- /dev/null +++ b/x/ratelimit/types/expected_keepers.go @@ -0,0 +1,20 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +// BankKeeper defines the banking contract that must be fulfilled when +// creating a x/ratelimit keeper. +type BankKeeper interface { + GetSupply(ctx sdk.Context, denom string) sdk.Coin +} + +// ChannelKeeper defines the channel contract that must be fulfilled when +// creating a x/ratelimit keeper. +type ChannelKeeper interface { + GetChannel(ctx sdk.Context, portID string, channelID string) (channeltypes.Channel, bool) + GetChannelClientState(ctx sdk.Context, portID string, channelID string) (string, exported.ClientState, error) +} diff --git a/x/ratelimit/types/flow.go b/x/ratelimit/types/flow.go new file mode 100644 index 000000000..dce7192ab --- /dev/null +++ b/x/ratelimit/types/flow.go @@ -0,0 +1,47 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" +) + +// Initializes a new flow from the channel value +func NewFlow(channelValue math.Int) Flow { + flow := Flow{ + ChannelValue: channelValue, + Inflow: math.ZeroInt(), + Outflow: math.ZeroInt(), + } + + return flow +} + +// Adds an amount to the rate limit's flow after an incoming packet was received +// Returns an error if the new inflow will cause the rate limit to exceed its quota +func (f *Flow) AddInflow(amount math.Int, quota Quota) error { + netInflow := f.Inflow.Sub(f.Outflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_RECV, netInflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Inflow exceeds quota - Net Inflow: %v, Channel Value: %v, Threshold: %v%%", + netInflow, f.ChannelValue, quota.MaxPercentRecv) + } + + f.Inflow = f.Inflow.Add(amount) + return nil +} + +// Adds an amount to the rate limit's flow after a packet was sent +// Returns an error if the new outflow will cause the rate limit to exceed its quota +func (f *Flow) AddOutflow(amount math.Int, quota Quota) error { + netOutflow := f.Outflow.Sub(f.Inflow).Add(amount) + + if quota.CheckExceedsQuota(PACKET_SEND, netOutflow, f.ChannelValue) { + return errorsmod.Wrapf(ErrQuotaExceeded, + "Outflow exceeds quota - Net Outflow: %v, Channel Value: %v, Threshold: %v%%", + netOutflow, f.ChannelValue, quota.MaxPercentSend) + } + + f.Outflow = f.Outflow.Add(amount) + return nil +} diff --git a/x/ratelimit/types/genesis.go b/x/ratelimit/types/genesis.go new file mode 100644 index 000000000..ca95a8d53 --- /dev/null +++ b/x/ratelimit/types/genesis.go @@ -0,0 +1,16 @@ +package types + +// DefaultGenesis returns the default Capability genesis state +func DefaultGenesisState() *GenesisState { + return &GenesisState{ + Params: DefaultParams(), + RateLimits: []RateLimit{}, + Epochs: []EpochInfo{NewGenesisEpochInfo(DAY_EPOCH, EpochHourPeriod)}, + } +} + +// Validate performs basic genesis state validation returning an error upon any +// failure. +func ValidateGenesis(data GenesisState) error { + return data.Params.Validate() +} diff --git a/x/ratelimit/types/genesis.pb.go b/x/ratelimit/types/genesis.pb.go new file mode 100644 index 000000000..23b08cf9a --- /dev/null +++ b/x/ratelimit/types/genesis.pb.go @@ -0,0 +1,577 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/genesis.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the ratelimit module's genesis state. +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` + RateLimits []RateLimit `protobuf:"bytes,2,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits" yaml:"rate_limits"` + WhitelistedAddressPairs []WhitelistedAddressPair `protobuf:"bytes,3,rep,name=whitelisted_address_pairs,json=whitelistedAddressPairs,proto3" json:"whitelisted_address_pairs" yaml:"whitelisted_address_pairs"` + PendingSendPacketSequenceNumbers []string `protobuf:"bytes,4,rep,name=pending_send_packet_sequence_numbers,json=pendingSendPacketSequenceNumbers,proto3" json:"pending_send_packet_sequence_numbers,omitempty"` + Epochs []EpochInfo `protobuf:"bytes,5,rep,name=epochs,proto3" json:"epochs"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_9b6ba3f85e177adf, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +func (m *GenesisState) GetWhitelistedAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.WhitelistedAddressPairs + } + return nil +} + +func (m *GenesisState) GetPendingSendPacketSequenceNumbers() []string { + if m != nil { + return m.PendingSendPacketSequenceNumbers + } + return nil +} + +func (m *GenesisState) GetEpochs() []EpochInfo { + if m != nil { + return m.Epochs + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "centauri.ratelimit.v1beta1.GenesisState") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/genesis.proto", fileDescriptor_9b6ba3f85e177adf) +} + +var fileDescriptor_9b6ba3f85e177adf = []byte{ + // 401 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3f, 0xeb, 0xd3, 0x40, + 0x18, 0xc7, 0x13, 0x53, 0x0b, 0x5e, 0x75, 0x30, 0x28, 0xc6, 0x0c, 0x69, 0x08, 0xfe, 0x09, 0x0a, + 0x09, 0xad, 0x9b, 0x9b, 0x11, 0x11, 0x41, 0x4a, 0x4d, 0x07, 0xc1, 0x25, 0x5c, 0x92, 0xc7, 0xf4, + 0xb0, 0xb9, 0xc4, 0xbb, 0xab, 0xb5, 0xef, 0xc2, 0xd1, 0x97, 0xd4, 0xb1, 0xa3, 0x53, 0x91, 0xf6, + 0x1d, 0x08, 0xee, 0x92, 0xdc, 0xd5, 0x76, 0xf8, 0x35, 0xbf, 0xed, 0x38, 0x3e, 0xdf, 0xcf, 0x97, + 0xe7, 0xee, 0x41, 0x7e, 0x06, 0x54, 0xe0, 0x25, 0x23, 0x21, 0xc3, 0x02, 0x16, 0xa4, 0x24, 0x22, + 0xfc, 0x36, 0x4a, 0x41, 0xe0, 0x51, 0x58, 0x00, 0x05, 0x4e, 0x78, 0x50, 0xb3, 0x4a, 0x54, 0xa6, + 0x7d, 0x24, 0x83, 0xff, 0x64, 0xa0, 0x48, 0xfb, 0x5e, 0x51, 0x15, 0x55, 0x8b, 0x85, 0xcd, 0x49, + 0x26, 0xec, 0xa7, 0x1d, 0xee, 0x1a, 0x33, 0x5c, 0x2a, 0xb5, 0xfd, 0xac, 0x03, 0x3c, 0x95, 0x49, + 0xf6, 0x49, 0x07, 0x0b, 0x75, 0x95, 0xcd, 0x25, 0xe7, 0xfd, 0x35, 0xd0, 0xed, 0xb7, 0x72, 0x80, + 0x99, 0xc0, 0x02, 0xcc, 0x0f, 0xa8, 0x2f, 0x4b, 0x2d, 0xdd, 0xd5, 0xfd, 0xc1, 0xd8, 0x0b, 0x2e, + 0x0f, 0x14, 0x4c, 0x5b, 0x32, 0xba, 0xbf, 0xd9, 0x0d, 0xb5, 0x3f, 0xbb, 0xe1, 0x9d, 0x35, 0x2e, + 0x17, 0x2f, 0x3d, 0x99, 0xf7, 0x62, 0x25, 0x32, 0x53, 0x34, 0x68, 0xa2, 0x49, 0x9b, 0xe5, 0xd6, + 0x0d, 0xd7, 0xf0, 0x07, 0xe3, 0xc7, 0x5d, 0xde, 0x18, 0x0b, 0x78, 0xdf, 0xdc, 0x44, 0xb6, 0x52, + 0x9b, 0x52, 0x7d, 0xe6, 0xf1, 0x62, 0xc4, 0x8e, 0x18, 0x37, 0x7f, 0xea, 0xe8, 0xe1, 0x6a, 0x4e, + 0x1a, 0x11, 0x17, 0x90, 0x27, 0x38, 0xcf, 0x19, 0x70, 0x9e, 0xd4, 0x98, 0x30, 0x6e, 0x19, 0x6d, + 0xe5, 0xb8, 0xab, 0xf2, 0xe3, 0x29, 0xfc, 0x4a, 0x66, 0xa7, 0x98, 0xb0, 0xc8, 0x57, 0xfd, 0xae, + 0xec, 0xbf, 0x58, 0xe1, 0xc5, 0x0f, 0x56, 0x57, 0x1a, 0xb8, 0x39, 0x41, 0x8f, 0x6a, 0xa0, 0x39, + 0xa1, 0x45, 0xc2, 0x81, 0xe6, 0x49, 0x8d, 0xb3, 0x2f, 0x20, 0x12, 0x0e, 0x5f, 0x97, 0x40, 0x33, + 0x48, 0xe8, 0xb2, 0x4c, 0x81, 0x71, 0xab, 0xe7, 0x1a, 0xfe, 0xad, 0xd8, 0x55, 0xec, 0x0c, 0x68, + 0x3e, 0x6d, 0xc9, 0x99, 0x02, 0x27, 0x92, 0x33, 0x5f, 0xa3, 0x7e, 0xfb, 0x83, 0xdc, 0xba, 0x79, + 0xfd, 0x4b, 0xbe, 0x69, 0xc8, 0x77, 0xf4, 0x73, 0x15, 0xf5, 0x9a, 0x49, 0x62, 0x15, 0x8d, 0x9e, + 0x6f, 0xf6, 0x8e, 0xbe, 0xdd, 0x3b, 0xfa, 0xef, 0xbd, 0xa3, 0xff, 0x38, 0x38, 0xda, 0xf6, 0xe0, + 0x68, 0xbf, 0x0e, 0x8e, 0xf6, 0xe9, 0xee, 0xf7, 0xb3, 0x95, 0x11, 0xeb, 0x1a, 0x78, 0xda, 0x6f, + 0x77, 0xe5, 0xc5, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xf4, 0xbb, 0xa3, 0x8c, 0x06, 0x03, 0x00, + 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Epochs) > 0 { + for iNdEx := len(m.Epochs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Epochs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for iNdEx := len(m.PendingSendPacketSequenceNumbers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PendingSendPacketSequenceNumbers[iNdEx]) + copy(dAtA[i:], m.PendingSendPacketSequenceNumbers[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.PendingSendPacketSequenceNumbers[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for iNdEx := len(m.WhitelistedAddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.WhitelistedAddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.WhitelistedAddressPairs) > 0 { + for _, e := range m.WhitelistedAddressPairs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.PendingSendPacketSequenceNumbers) > 0 { + for _, s := range m.PendingSendPacketSequenceNumbers { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Epochs) > 0 { + for _, e := range m.Epochs { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhitelistedAddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhitelistedAddressPairs = append(m.WhitelistedAddressPairs, WhitelistedAddressPair{}) + if err := m.WhitelistedAddressPairs[len(m.WhitelistedAddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingSendPacketSequenceNumbers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingSendPacketSequenceNumbers = append(m.PendingSendPacketSequenceNumbers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Epochs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Epochs = append(m.Epochs, EpochInfo{}) + if err := m.Epochs[len(m.Epochs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/keys.go b/x/ratelimit/types/keys.go new file mode 100644 index 000000000..246dcc301 --- /dev/null +++ b/x/ratelimit/types/keys.go @@ -0,0 +1,45 @@ +package types + +import "encoding/binary" + +const ( + ModuleName = "ratelimit" + + // StoreKey defines the primary module store key + StoreKey = ModuleName + + // RouterKey is the message route for slashing + RouterKey = ModuleName + + // QuerierRoute defines the module's query routing key + QuerierRoute = ModuleName +) + +func KeyPrefix(p string) []byte { + return []byte(p) +} + +var ( + PathKeyPrefix = KeyPrefix("path") + RateLimitKeyPrefix = KeyPrefix("rate-limit") + PendingSendPacketPrefix = KeyPrefix("pending-send-packet") + DenomBlacklistKeyPrefix = KeyPrefix("denom-blacklist") + AddressWhitelistKeyPrefix = KeyPrefix("address-blacklist") + EpochKeyPrefix = KeyPrefix("epoch") + + PendingSendPacketChannelLength int = 16 +) + +func GetPendingSendPacketKey(channelId string, sequenceNumber uint64) []byte { + channelIdBz := make([]byte, PendingSendPacketChannelLength) + copy(channelIdBz[:], channelId) + + sequenceNumberBz := make([]byte, 8) + binary.BigEndian.PutUint64(sequenceNumberBz, sequenceNumber) + + return append(channelIdBz, sequenceNumberBz...) +} + +func GetAddressWhitelistKey(sender, receiver string) []byte { + return append(KeyPrefix(sender), KeyPrefix(receiver)...) +} diff --git a/x/ratelimit/types/msg.go b/x/ratelimit/types/msg.go new file mode 100644 index 000000000..caaaf77d6 --- /dev/null +++ b/x/ratelimit/types/msg.go @@ -0,0 +1,244 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + "cosmossdk.io/math" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" +) + +const ( + TypeMsgAddRateLimit = "add_rate_limit" + TypeMsgUpdateRateLimit = "update_rate_limit" + TypeMsgRemoveRateLimit = "remove_rate_limit" + TypeMsgResetRateLimit = "reset_rate_limit" +) + +var _ sdk.Msg = &MsgAddRateLimit{} + +func NewMsgAddRateLimit( + authority string, + denom string, + channelID string, + maxPercentSend math.Int, + maxPercentRecv math.Int, + durationHours uint64, +) *MsgAddRateLimit { + return &MsgAddRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +// Route Implements Msg. +func (msg MsgAddRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgAddRateLimit) Type() string { return TypeMsgAddRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgAddRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgAddRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgAddRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + if msg.MaxPercentSend.GT(math.NewInt(100)) || msg.MaxPercentSend.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(math.NewInt(100)) || msg.MaxPercentRecv.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +var _ sdk.Msg = &MsgUpdateRateLimit{} + +func NewMsgUpdateRateLimit( + authority string, + denom string, + channelID string, + maxPercentSend math.Int, + maxPercentRecv math.Int, + durationHours uint64, +) *MsgUpdateRateLimit { + return &MsgUpdateRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + MaxPercentSend: maxPercentSend, + MaxPercentRecv: maxPercentRecv, + DurationHours: durationHours, + } +} + +// Route Implements Msg. +func (msg MsgUpdateRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgUpdateRateLimit) Type() string { return TypeMsgUpdateRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgUpdateRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgUpdateRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgUpdateRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + if msg.MaxPercentSend.GT(math.NewInt(100)) || msg.MaxPercentSend.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-send percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentSend) + } + + if msg.MaxPercentRecv.GT(math.NewInt(100)) || msg.MaxPercentRecv.LT(math.ZeroInt()) { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "max-percent-recv percent must be between 0 and 100 (inclusively), Provided: %v", msg.MaxPercentRecv) + } + + if msg.MaxPercentRecv.IsZero() && msg.MaxPercentSend.IsZero() { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "either the max send or max receive threshold must be greater than 0") + } + + if msg.DurationHours == 0 { + return errorsmod.Wrapf(sdkerrors.ErrInvalidRequest, "duration can not be zero") + } + + return nil +} + +var _ sdk.Msg = &MsgRemoveRateLimit{} + +func NewMsgRemoveRateLimit( + authority string, + denom string, + channelID string, +) *MsgRemoveRateLimit { + return &MsgRemoveRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + } +} + +// Route Implements Msg. +func (msg MsgRemoveRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgRemoveRateLimit) Type() string { return TypeMsgRemoveRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgRemoveRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgRemoveRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgRemoveRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + return nil +} + +var _ sdk.Msg = &MsgResetRateLimit{} + +func NewMsgResetRateLimit( + authority string, + denom string, + channelID string, +) *MsgResetRateLimit { + return &MsgResetRateLimit{ + Authority: authority, + Denom: denom, + ChannelId: channelID, + } +} + +// Route Implements Msg. +func (msg MsgResetRateLimit) Route() string { return RouterKey } + +// Type Implements Msg. +func (msg MsgResetRateLimit) Type() string { return TypeMsgResetRateLimit } + +// GetSignBytes implements the LegacyMsg interface. +func (msg MsgResetRateLimit) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners returns the expected signers for a MsgAddParachainIBCTokenInfo message. +func (msg *MsgResetRateLimit) GetSigners() []sdk.AccAddress { + addr, _ := sdk.AccAddressFromBech32(msg.Authority) + return []sdk.AccAddress{addr} +} + +// ValidateBasic does a sanity check on the provided data. +func (msg *MsgResetRateLimit) ValidateBasic() error { + // validate authority + if _, err := sdk.AccAddressFromBech32(msg.Authority); err != nil { + return errorsmod.Wrap(err, "invalid authority address") + } + + // validate channelIds + if err := host.ChannelIdentifierValidator(msg.ChannelId); err != nil { + return err + } + + return nil +} diff --git a/x/ratelimit/types/params.go b/x/ratelimit/types/params.go new file mode 100644 index 000000000..4f3215e35 --- /dev/null +++ b/x/ratelimit/types/params.go @@ -0,0 +1,32 @@ +package types + +import ( + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +// ParamKeyTable the param key table for launch module +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +// NewParams creates a new Params instance +func NewParams() Params { + return Params{} +} + +// DefaultParams returns a default set of parameters +func DefaultParams() Params { + return NewParams() +} + +// ParamSetPairs get the params.ParamSet +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{} +} + +// Validate validates the set of params +func (p Params) Validate() error { + return nil +} diff --git a/x/ratelimit/types/params.pb.go b/x/ratelimit/types/params.pb.go new file mode 100644 index 000000000..bc9bf28dc --- /dev/null +++ b/x/ratelimit/types/params.pb.go @@ -0,0 +1,268 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/params.proto + +package types + +import ( + fmt "fmt" + _ "github.com/cosmos/cosmos-proto" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params holds parameters for the mint module. +type Params struct { +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_1b21f4042f9a3cfb, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Params)(nil), "centauri.ratelimit.v1beta1.Params") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/params.proto", fileDescriptor_1b21f4042f9a3cfb) +} + +var fileDescriptor_1b21f4042f9a3cfb = []byte{ + // 152 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0x4e, 0xcd, 0x2b, + 0x49, 0x2c, 0x2d, 0xca, 0xd4, 0x2f, 0x4a, 0x2c, 0x49, 0xcd, 0xc9, 0xcc, 0xcd, 0x2c, 0xd1, 0x2f, + 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd4, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x82, 0x29, 0xd4, 0x83, 0x2b, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, + 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd3, 0x07, 0xb1, 0x20, 0x3a, 0xa4, 0x24, 0x93, 0xf3, 0x8b, 0x73, + 0xf3, 0x8b, 0xe3, 0x21, 0x12, 0x10, 0x0e, 0x44, 0x4a, 0x89, 0x83, 0x8b, 0x2d, 0x00, 0x6c, 0xb8, + 0x93, 0xf6, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, + 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0x09, 0x56, 0x20, 0x39, + 0xa9, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, 0xac, 0xdb, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, + 0x91, 0xb5, 0x1b, 0x71, 0xb5, 0x00, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.go b/x/ratelimit/types/query.pb.go new file mode 100644 index 000000000..9c9dcb3d8 --- /dev/null +++ b/x/ratelimit/types/query.pb.go @@ -0,0 +1,2108 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/query.proto + +package types + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type QueryAllRateLimitsRequest struct { +} + +func (m *QueryAllRateLimitsRequest) Reset() { *m = QueryAllRateLimitsRequest{} } +func (m *QueryAllRateLimitsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsRequest) ProtoMessage() {} +func (*QueryAllRateLimitsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{0} +} +func (m *QueryAllRateLimitsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsRequest.Merge(m, src) +} +func (m *QueryAllRateLimitsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsRequest proto.InternalMessageInfo + +type QueryAllRateLimitsResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryAllRateLimitsResponse) Reset() { *m = QueryAllRateLimitsResponse{} } +func (m *QueryAllRateLimitsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllRateLimitsResponse) ProtoMessage() {} +func (*QueryAllRateLimitsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{1} +} +func (m *QueryAllRateLimitsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllRateLimitsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllRateLimitsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllRateLimitsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllRateLimitsResponse.Merge(m, src) +} +func (m *QueryAllRateLimitsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllRateLimitsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllRateLimitsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllRateLimitsResponse proto.InternalMessageInfo + +func (m *QueryAllRateLimitsResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitRequest struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitRequest) Reset() { *m = QueryRateLimitRequest{} } +func (m *QueryRateLimitRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitRequest) ProtoMessage() {} +func (*QueryRateLimitRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{2} +} +func (m *QueryRateLimitRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitRequest.Merge(m, src) +} +func (m *QueryRateLimitRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitRequest proto.InternalMessageInfo + +func (m *QueryRateLimitRequest) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *QueryRateLimitRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitResponse struct { + RateLimit *RateLimit `protobuf:"bytes,1,opt,name=rate_limit,json=rateLimit,proto3" json:"rate_limit,omitempty"` +} + +func (m *QueryRateLimitResponse) Reset() { *m = QueryRateLimitResponse{} } +func (m *QueryRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitResponse) ProtoMessage() {} +func (*QueryRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{3} +} +func (m *QueryRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitResponse.Merge(m, src) +} +func (m *QueryRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitResponse proto.InternalMessageInfo + +func (m *QueryRateLimitResponse) GetRateLimit() *RateLimit { + if m != nil { + return m.RateLimit + } + return nil +} + +type QueryRateLimitsByChainIdRequest struct { + ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` +} + +func (m *QueryRateLimitsByChainIdRequest) Reset() { *m = QueryRateLimitsByChainIdRequest{} } +func (m *QueryRateLimitsByChainIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChainIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{4} +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdRequest) GetChainId() string { + if m != nil { + return m.ChainId + } + return "" +} + +type QueryRateLimitsByChainIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChainIdResponse) Reset() { *m = QueryRateLimitsByChainIdResponse{} } +func (m *QueryRateLimitsByChainIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChainIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChainIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{5} +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChainIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChainIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChainIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChainIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChainIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChainIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryRateLimitsByChannelIdRequest struct { + ChannelId string `protobuf:"bytes,1,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *QueryRateLimitsByChannelIdRequest) Reset() { *m = QueryRateLimitsByChannelIdRequest{} } +func (m *QueryRateLimitsByChannelIdRequest) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdRequest) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{6} +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdRequest proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdRequest) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type QueryRateLimitsByChannelIdResponse struct { + RateLimits []RateLimit `protobuf:"bytes,1,rep,name=rate_limits,json=rateLimits,proto3" json:"rate_limits"` +} + +func (m *QueryRateLimitsByChannelIdResponse) Reset() { *m = QueryRateLimitsByChannelIdResponse{} } +func (m *QueryRateLimitsByChannelIdResponse) String() string { return proto.CompactTextString(m) } +func (*QueryRateLimitsByChannelIdResponse) ProtoMessage() {} +func (*QueryRateLimitsByChannelIdResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{7} +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.Merge(m, src) +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryRateLimitsByChannelIdResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryRateLimitsByChannelIdResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryRateLimitsByChannelIdResponse proto.InternalMessageInfo + +func (m *QueryRateLimitsByChannelIdResponse) GetRateLimits() []RateLimit { + if m != nil { + return m.RateLimits + } + return nil +} + +type QueryAllWhitelistedAddressesRequest struct { +} + +func (m *QueryAllWhitelistedAddressesRequest) Reset() { *m = QueryAllWhitelistedAddressesRequest{} } +func (m *QueryAllWhitelistedAddressesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesRequest) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{8} +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesRequest proto.InternalMessageInfo + +type QueryAllWhitelistedAddressesResponse struct { + AddressPairs []WhitelistedAddressPair `protobuf:"bytes,1,rep,name=address_pairs,json=addressPairs,proto3" json:"address_pairs"` +} + +func (m *QueryAllWhitelistedAddressesResponse) Reset() { *m = QueryAllWhitelistedAddressesResponse{} } +func (m *QueryAllWhitelistedAddressesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryAllWhitelistedAddressesResponse) ProtoMessage() {} +func (*QueryAllWhitelistedAddressesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b0fc123b957ea496, []int{9} +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.Merge(m, src) +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryAllWhitelistedAddressesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllWhitelistedAddressesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllWhitelistedAddressesResponse proto.InternalMessageInfo + +func (m *QueryAllWhitelistedAddressesResponse) GetAddressPairs() []WhitelistedAddressPair { + if m != nil { + return m.AddressPairs + } + return nil +} + +func init() { + proto.RegisterType((*QueryAllRateLimitsRequest)(nil), "centauri.ratelimit.v1beta1.QueryAllRateLimitsRequest") + proto.RegisterType((*QueryAllRateLimitsResponse)(nil), "centauri.ratelimit.v1beta1.QueryAllRateLimitsResponse") + proto.RegisterType((*QueryRateLimitRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitRequest") + proto.RegisterType((*QueryRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitResponse") + proto.RegisterType((*QueryRateLimitsByChainIdRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChainIdRequest") + proto.RegisterType((*QueryRateLimitsByChainIdResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChainIdResponse") + proto.RegisterType((*QueryRateLimitsByChannelIdRequest)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChannelIdRequest") + proto.RegisterType((*QueryRateLimitsByChannelIdResponse)(nil), "centauri.ratelimit.v1beta1.QueryRateLimitsByChannelIdResponse") + proto.RegisterType((*QueryAllWhitelistedAddressesRequest)(nil), "centauri.ratelimit.v1beta1.QueryAllWhitelistedAddressesRequest") + proto.RegisterType((*QueryAllWhitelistedAddressesResponse)(nil), "centauri.ratelimit.v1beta1.QueryAllWhitelistedAddressesResponse") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/query.proto", fileDescriptor_b0fc123b957ea496) +} + +var fileDescriptor_b0fc123b957ea496 = []byte{ + // 612 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x95, 0x4f, 0x6f, 0x12, 0x41, + 0x18, 0xc6, 0x99, 0x6a, 0xad, 0xbc, 0xb5, 0x07, 0xc7, 0x56, 0xe9, 0xaa, 0x5b, 0x5c, 0x6d, 0x53, + 0x25, 0xb2, 0x01, 0x52, 0x2f, 0xad, 0x7f, 0x8a, 0x5e, 0x9a, 0x70, 0x50, 0x2e, 0x26, 0x26, 0x4a, + 0x06, 0x76, 0x02, 0x6b, 0xe8, 0xce, 0x76, 0x67, 0x50, 0x89, 0xe9, 0xc5, 0xc4, 0xbb, 0x89, 0xdf, + 0xc1, 0xab, 0x1f, 0xc2, 0x0b, 0x89, 0x07, 0x9b, 0x78, 0xf1, 0x64, 0x0c, 0xf8, 0x41, 0xcc, 0xce, + 0xce, 0x2e, 0x42, 0x61, 0x0b, 0x24, 0xbd, 0x2d, 0x33, 0xef, 0xf3, 0xcc, 0xef, 0x7d, 0x77, 0x9e, + 0x05, 0x36, 0x6a, 0xd4, 0x11, 0xa4, 0xe5, 0xd9, 0xa6, 0x47, 0x04, 0x6d, 0xda, 0xfb, 0xb6, 0x30, + 0xdf, 0xe4, 0xaa, 0x54, 0x90, 0x9c, 0x79, 0xd0, 0xa2, 0x5e, 0x3b, 0xeb, 0x7a, 0x4c, 0x30, 0xac, + 0x85, 0x75, 0xd9, 0xa8, 0x2e, 0xab, 0xea, 0xb4, 0x3b, 0x31, 0x1e, 0xfd, 0x6a, 0xe9, 0xa3, 0x5d, + 0xab, 0x33, 0x56, 0x6f, 0x52, 0x93, 0xb8, 0xb6, 0x49, 0x1c, 0x87, 0x09, 0x22, 0x6c, 0xe6, 0x70, + 0xb5, 0xbb, 0x5c, 0x67, 0x75, 0x26, 0x1f, 0x4d, 0xff, 0x29, 0x58, 0x35, 0xae, 0xc2, 0xea, 0x33, + 0x1f, 0x65, 0xb7, 0xd9, 0x2c, 0x13, 0x41, 0x4b, 0xbe, 0x1d, 0x2f, 0xd3, 0x83, 0x16, 0xe5, 0xc2, + 0x78, 0x0d, 0xda, 0xa8, 0x4d, 0xee, 0x32, 0x87, 0x53, 0x5c, 0x82, 0x45, 0x9f, 0xa0, 0x22, 0x11, + 0x78, 0x0a, 0xa5, 0xcf, 0x6c, 0x2e, 0xe6, 0xd7, 0xb3, 0xe3, 0x9b, 0xc9, 0x46, 0x26, 0xc5, 0xb3, + 0x9d, 0xdf, 0x6b, 0x89, 0x32, 0x78, 0x91, 0xab, 0x51, 0x82, 0x15, 0x79, 0x56, 0x54, 0xa3, 0x20, + 0xf0, 0x32, 0xcc, 0x5b, 0xd4, 0x61, 0xfb, 0x29, 0x94, 0x46, 0x9b, 0xc9, 0x72, 0xf0, 0x03, 0x5f, + 0x07, 0xa8, 0x35, 0x88, 0xe3, 0xd0, 0x66, 0xc5, 0xb6, 0x52, 0x73, 0x72, 0x2b, 0xa9, 0x56, 0xf6, + 0x2c, 0xe3, 0x15, 0x5c, 0x1e, 0x76, 0x53, 0xd4, 0x4f, 0x00, 0xfa, 0xd4, 0xd2, 0x73, 0x52, 0xe8, + 0x72, 0x32, 0xc2, 0x35, 0x76, 0x60, 0x6d, 0xd0, 0x9f, 0x17, 0xdb, 0x8f, 0x1b, 0xc4, 0x76, 0xf6, + 0xac, 0x90, 0x7b, 0x15, 0xce, 0xd7, 0xfc, 0x15, 0x9f, 0x2f, 0x40, 0x5f, 0xa8, 0x05, 0x15, 0x86, + 0x0b, 0xe9, 0xf1, 0xea, 0x53, 0x99, 0x6e, 0x11, 0x6e, 0x8c, 0x3a, 0x31, 0x98, 0x56, 0x48, 0x3c, + 0x38, 0x53, 0x34, 0x3c, 0x53, 0x0f, 0x8c, 0x38, 0x8f, 0x53, 0xe1, 0x5e, 0x87, 0x9b, 0xe1, 0x0d, + 0x7c, 0xde, 0xb0, 0x7d, 0x25, 0x17, 0xd4, 0xda, 0xb5, 0x2c, 0x8f, 0x72, 0x4e, 0xa3, 0x8b, 0xfa, + 0x11, 0xc1, 0xad, 0xf8, 0x3a, 0x45, 0xf7, 0x12, 0x96, 0x48, 0xb0, 0x58, 0x71, 0x89, 0xed, 0x85, + 0x7c, 0xf9, 0x38, 0xbe, 0xe3, 0x86, 0x4f, 0x89, 0xed, 0x29, 0xd8, 0x0b, 0xa4, 0xbf, 0xc4, 0xf3, + 0x9d, 0x05, 0x98, 0x97, 0x1c, 0xf8, 0x0b, 0x82, 0xa5, 0x81, 0xd8, 0xe0, 0xad, 0xb8, 0x33, 0xc6, + 0x66, 0x50, 0xbb, 0x37, 0xad, 0x2c, 0xe8, 0xd4, 0xd8, 0xf8, 0xf0, 0xf3, 0xef, 0xe7, 0xb9, 0x34, + 0xd6, 0xcd, 0x11, 0x5f, 0x90, 0xe8, 0x89, 0xe3, 0xaf, 0x08, 0x92, 0x91, 0x1c, 0xe7, 0x4e, 0x3c, + 0x6d, 0x38, 0x9f, 0x5a, 0x7e, 0x1a, 0x89, 0x82, 0xdb, 0x96, 0x70, 0x5b, 0xb8, 0x10, 0x0b, 0x67, + 0xbe, 0xef, 0x5f, 0xc7, 0x43, 0xb3, 0xda, 0xae, 0x04, 0xd1, 0xff, 0x86, 0xe0, 0xd2, 0x88, 0xe4, + 0xe0, 0xed, 0xc9, 0x41, 0x8e, 0xa5, 0x55, 0xdb, 0x99, 0x4d, 0xac, 0xfa, 0xc9, 0xc9, 0x7e, 0x32, + 0xf8, 0x76, 0xfc, 0xb0, 0x65, 0x43, 0xf2, 0x8b, 0x70, 0x88, 0xbf, 0x23, 0x58, 0x19, 0x99, 0x24, + 0x7c, 0x7f, 0x5a, 0x94, 0x81, 0x14, 0x6b, 0x0f, 0x66, 0x95, 0xab, 0x5e, 0x0a, 0xb2, 0x97, 0xbb, + 0x38, 0x33, 0x41, 0x2f, 0xe1, 0xcb, 0xc1, 0x3f, 0x10, 0x5c, 0x19, 0x93, 0x3d, 0xfc, 0x70, 0x92, + 0x1b, 0x1c, 0x93, 0x6e, 0xed, 0xd1, 0xec, 0x06, 0x93, 0xbc, 0x9f, 0xb7, 0x7d, 0x65, 0x85, 0x84, + 0xd2, 0x62, 0xa6, 0xd3, 0xd5, 0xd1, 0x51, 0x57, 0x47, 0x7f, 0xba, 0x3a, 0xfa, 0xd4, 0xd3, 0x13, + 0x47, 0x3d, 0x3d, 0xf1, 0xab, 0xa7, 0x27, 0x5e, 0x5c, 0x7c, 0xf7, 0x9f, 0x58, 0xb4, 0x5d, 0xca, + 0xab, 0xe7, 0xe4, 0x9f, 0x69, 0xe1, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x90, 0x72, 0x96, 0x2b, + 0xf2, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) + RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) + RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) + AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllRateLimits(ctx context.Context, in *QueryAllRateLimitsRequest, opts ...grpc.CallOption) (*QueryAllRateLimitsResponse, error) { + out := new(QueryAllRateLimitsResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/AllRateLimits", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimit(ctx context.Context, in *QueryRateLimitRequest, opts ...grpc.CallOption) (*QueryRateLimitResponse, error) { + out := new(QueryRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChainId(ctx context.Context, in *QueryRateLimitsByChainIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChainIdResponse, error) { + out := new(QueryRateLimitsByChainIdResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimitsByChainId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) RateLimitsByChannelId(ctx context.Context, in *QueryRateLimitsByChannelIdRequest, opts ...grpc.CallOption) (*QueryRateLimitsByChannelIdResponse, error) { + out := new(QueryRateLimitsByChannelIdResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/RateLimitsByChannelId", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AllWhitelistedAddresses(ctx context.Context, in *QueryAllWhitelistedAddressesRequest, opts ...grpc.CallOption) (*QueryAllWhitelistedAddressesResponse, error) { + out := new(QueryAllWhitelistedAddressesResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Query/AllWhitelistedAddresses", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + AllRateLimits(context.Context, *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) + RateLimit(context.Context, *QueryRateLimitRequest) (*QueryRateLimitResponse, error) + RateLimitsByChainId(context.Context, *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) + RateLimitsByChannelId(context.Context, *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) + AllWhitelistedAddresses(context.Context, *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllRateLimits(ctx context.Context, req *QueryAllRateLimitsRequest) (*QueryAllRateLimitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllRateLimits not implemented") +} +func (*UnimplementedQueryServer) RateLimit(ctx context.Context, req *QueryRateLimitRequest) (*QueryRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimit not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChainId(ctx context.Context, req *QueryRateLimitsByChainIdRequest) (*QueryRateLimitsByChainIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChainId not implemented") +} +func (*UnimplementedQueryServer) RateLimitsByChannelId(ctx context.Context, req *QueryRateLimitsByChannelIdRequest) (*QueryRateLimitsByChannelIdResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RateLimitsByChannelId not implemented") +} +func (*UnimplementedQueryServer) AllWhitelistedAddresses(ctx context.Context, req *QueryAllWhitelistedAddressesRequest) (*QueryAllWhitelistedAddressesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllWhitelistedAddresses not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllRateLimits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllRateLimitsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllRateLimits(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/AllRateLimits", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllRateLimits(ctx, req.(*QueryAllRateLimitsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimit(ctx, req.(*QueryRateLimitRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChainId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChainIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChainId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimitsByChainId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChainId(ctx, req.(*QueryRateLimitsByChainIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_RateLimitsByChannelId_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryRateLimitsByChannelIdRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).RateLimitsByChannelId(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/RateLimitsByChannelId", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).RateLimitsByChannelId(ctx, req.(*QueryRateLimitsByChannelIdRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AllWhitelistedAddresses_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllWhitelistedAddressesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Query/AllWhitelistedAddresses", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllWhitelistedAddresses(ctx, req.(*QueryAllWhitelistedAddressesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "centauri.ratelimit.v1beta1.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllRateLimits", + Handler: _Query_AllRateLimits_Handler, + }, + { + MethodName: "RateLimit", + Handler: _Query_RateLimit_Handler, + }, + { + MethodName: "RateLimitsByChainId", + Handler: _Query_RateLimitsByChainId_Handler, + }, + { + MethodName: "RateLimitsByChannelId", + Handler: _Query_RateLimitsByChannelId_Handler, + }, + { + MethodName: "AllWhitelistedAddresses", + Handler: _Query_AllWhitelistedAddresses_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "centauri/ratelimit/v1beta1/query.proto", +} + +func (m *QueryAllRateLimitsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllRateLimitsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllRateLimitsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllRateLimitsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RateLimit != nil { + { + size, err := m.RateLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChainId) > 0 { + i -= len(m.ChainId) + copy(dAtA[i:], m.ChainId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChainId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChainIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChainIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintQuery(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryRateLimitsByChannelIdResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryRateLimitsByChannelIdResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.RateLimits) > 0 { + for iNdEx := len(m.RateLimits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RateLimits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *QueryAllWhitelistedAddressesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllWhitelistedAddressesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AddressPairs) > 0 { + for iNdEx := len(m.AddressPairs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AddressPairs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryAllRateLimitsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllRateLimitsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RateLimit != nil { + l = m.RateLimit.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChainIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryRateLimitsByChannelIdRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryRateLimitsByChannelIdResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RateLimits) > 0 { + for _, e := range m.RateLimits { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func (m *QueryAllWhitelistedAddressesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *QueryAllWhitelistedAddressesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AddressPairs) > 0 { + for _, e := range m.AddressPairs { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryAllRateLimitsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllRateLimitsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllRateLimitsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RateLimit == nil { + m.RateLimit = &RateLimit{} + } + if err := m.RateLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChainIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChainIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryRateLimitsByChannelIdResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryRateLimitsByChannelIdResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RateLimits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RateLimits = append(m.RateLimits, RateLimit{}) + if err := m.RateLimits[len(m.RateLimits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllWhitelistedAddressesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllWhitelistedAddressesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AddressPairs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AddressPairs = append(m.AddressPairs, WhitelistedAddressPair{}) + if err := m.AddressPairs[len(m.AddressPairs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/query.pb.gw.go b/x/ratelimit/types/query.pb.gw.go new file mode 100644 index 000000000..95020deff --- /dev/null +++ b/x/ratelimit/types/query.pb.gw.go @@ -0,0 +1,539 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/query.proto + +/* +Package types is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package types + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllRateLimits(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllRateLimits_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllRateLimitsRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllRateLimits(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_RateLimit_0 = &utilities.DoubleArray{Encoding: map[string]int{"channel_id": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.RateLimit(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimit_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_RateLimit_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.RateLimit(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := client.RateLimitsByChainId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChainId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChainIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["chain_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "chain_id") + } + + protoReq.ChainId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "chain_id", err) + } + + msg, err := server.RateLimitsByChainId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := client.RateLimitsByChannelId(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_RateLimitsByChannelId_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryRateLimitsByChannelIdRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["channel_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "channel_id") + } + + protoReq.ChannelId, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "channel_id", err) + } + + msg, err := server.RateLimitsByChannelId(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := client.AllWhitelistedAddresses(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllWhitelistedAddresses_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllWhitelistedAddressesRequest + var metadata runtime.ServerMetadata + + msg, err := server.AllWhitelistedAddresses(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllRateLimits_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimit_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllRateLimits_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllRateLimits_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllRateLimits_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimit_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimit_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimit_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChainId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChainId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChainId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_RateLimitsByChannelId_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_RateLimitsByChannelId_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_RateLimitsByChannelId_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AllWhitelistedAddresses_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllWhitelistedAddresses_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllWhitelistedAddresses_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllRateLimits_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"centauri", "ratelimit", "ratelimits"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimit_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 1, 1, 0, 4, 1, 5, 2, 2, 3}, []string{"centauri", "ratelimit", "channel_id", "by_denom"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChainId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"centauri", "ratelimit", "ratelimits", "chain_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_RateLimitsByChannelId_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"centauri", "ratelimit", "ratelimits", "channel_id"}, "", runtime.AssumeColonVerbOpt(false))) + + pattern_Query_AllWhitelistedAddresses_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"centauri", "ratelimit", "whitelisted_addresses"}, "", runtime.AssumeColonVerbOpt(false))) +) + +var ( + forward_Query_AllRateLimits_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimit_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChainId_0 = runtime.ForwardResponseMessage + + forward_Query_RateLimitsByChannelId_0 = runtime.ForwardResponseMessage + + forward_Query_AllWhitelistedAddresses_0 = runtime.ForwardResponseMessage +) diff --git a/x/ratelimit/types/quota.go b/x/ratelimit/types/quota.go new file mode 100644 index 000000000..8bfa24581 --- /dev/null +++ b/x/ratelimit/types/quota.go @@ -0,0 +1,22 @@ +package types + +import ( + "cosmossdk.io/math" +) + +// CheckExceedsQuota checks if new in/out flow is going to reach the max in/out or not +func (q *Quota) CheckExceedsQuota(direction PacketDirection, amount math.Int, totalValue math.Int) bool { + // If there's no channel value (this should be almost impossible), it means there is no + // supply of the asset, so we shoudn't prevent inflows/outflows + if totalValue.IsZero() { + return false + } + var threshold math.Int + if direction == PACKET_RECV { + threshold = totalValue.Mul(q.MaxPercentRecv).Quo(math.NewInt(100)) + } else { + threshold = totalValue.Mul(q.MaxPercentSend).Quo(math.NewInt(100)) + } + + return amount.GT(threshold) +} diff --git a/x/ratelimit/types/ratelimit.pb.go b/x/ratelimit/types/ratelimit.pb.go new file mode 100644 index 000000000..af9923387 --- /dev/null +++ b/x/ratelimit/types/ratelimit.pb.go @@ -0,0 +1,1446 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/ratelimit.proto + +package types + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type PacketDirection int32 + +const ( + PACKET_SEND PacketDirection = 0 + PACKET_RECV PacketDirection = 1 +) + +var PacketDirection_name = map[int32]string{ + 0: "PACKET_SEND", + 1: "PACKET_RECV", +} + +var PacketDirection_value = map[string]int32{ + "PACKET_SEND": 0, + "PACKET_RECV": 1, +} + +func (x PacketDirection) String() string { + return proto.EnumName(PacketDirection_name, int32(x)) +} + +func (PacketDirection) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{0} +} + +type Path struct { + Denom string `protobuf:"bytes,1,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{0} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(m, src) +} +func (m *Path) XXX_Size() int { + return m.Size() +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *Path) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type Quota struct { + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,3,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *Quota) Reset() { *m = Quota{} } +func (m *Quota) String() string { return proto.CompactTextString(m) } +func (*Quota) ProtoMessage() {} +func (*Quota) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{1} +} +func (m *Quota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Quota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Quota.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Quota) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quota.Merge(m, src) +} +func (m *Quota) XXX_Size() int { + return m.Size() +} +func (m *Quota) XXX_DiscardUnknown() { + xxx_messageInfo_Quota.DiscardUnknown(m) +} + +var xxx_messageInfo_Quota proto.InternalMessageInfo + +func (m *Quota) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type Flow struct { + Inflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=inflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"inflow"` + Outflow github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,2,opt,name=outflow,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"outflow"` + ChannelValue github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,3,opt,name=channel_value,json=channelValue,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"channel_value"` +} + +func (m *Flow) Reset() { *m = Flow{} } +func (m *Flow) String() string { return proto.CompactTextString(m) } +func (*Flow) ProtoMessage() {} +func (*Flow) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{2} +} +func (m *Flow) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Flow) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Flow.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Flow) XXX_Merge(src proto.Message) { + xxx_messageInfo_Flow.Merge(m, src) +} +func (m *Flow) XXX_Size() int { + return m.Size() +} +func (m *Flow) XXX_DiscardUnknown() { + xxx_messageInfo_Flow.DiscardUnknown(m) +} + +var xxx_messageInfo_Flow proto.InternalMessageInfo + +type RateLimit struct { + Path *Path `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Quota *Quota `protobuf:"bytes,2,opt,name=quota,proto3" json:"quota,omitempty"` + Flow *Flow `protobuf:"bytes,3,opt,name=flow,proto3" json:"flow,omitempty"` +} + +func (m *RateLimit) Reset() { *m = RateLimit{} } +func (m *RateLimit) String() string { return proto.CompactTextString(m) } +func (*RateLimit) ProtoMessage() {} +func (*RateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{3} +} +func (m *RateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_RateLimit.Merge(m, src) +} +func (m *RateLimit) XXX_Size() int { + return m.Size() +} +func (m *RateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_RateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_RateLimit proto.InternalMessageInfo + +func (m *RateLimit) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +func (m *RateLimit) GetQuota() *Quota { + if m != nil { + return m.Quota + } + return nil +} + +func (m *RateLimit) GetFlow() *Flow { + if m != nil { + return m.Flow + } + return nil +} + +type WhitelistedAddressPair struct { + Sender string `protobuf:"bytes,1,opt,name=sender,proto3" json:"sender,omitempty"` + Receiver string `protobuf:"bytes,2,opt,name=receiver,proto3" json:"receiver,omitempty"` +} + +func (m *WhitelistedAddressPair) Reset() { *m = WhitelistedAddressPair{} } +func (m *WhitelistedAddressPair) String() string { return proto.CompactTextString(m) } +func (*WhitelistedAddressPair) ProtoMessage() {} +func (*WhitelistedAddressPair) Descriptor() ([]byte, []int) { + return fileDescriptor_825b72046a6cedeb, []int{4} +} +func (m *WhitelistedAddressPair) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WhitelistedAddressPair) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WhitelistedAddressPair.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WhitelistedAddressPair) XXX_Merge(src proto.Message) { + xxx_messageInfo_WhitelistedAddressPair.Merge(m, src) +} +func (m *WhitelistedAddressPair) XXX_Size() int { + return m.Size() +} +func (m *WhitelistedAddressPair) XXX_DiscardUnknown() { + xxx_messageInfo_WhitelistedAddressPair.DiscardUnknown(m) +} + +var xxx_messageInfo_WhitelistedAddressPair proto.InternalMessageInfo + +func (m *WhitelistedAddressPair) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + +func (m *WhitelistedAddressPair) GetReceiver() string { + if m != nil { + return m.Receiver + } + return "" +} + +func init() { + proto.RegisterEnum("centauri.ratelimit.v1beta1.PacketDirection", PacketDirection_name, PacketDirection_value) + proto.RegisterType((*Path)(nil), "centauri.ratelimit.v1beta1.Path") + proto.RegisterType((*Quota)(nil), "centauri.ratelimit.v1beta1.Quota") + proto.RegisterType((*Flow)(nil), "centauri.ratelimit.v1beta1.Flow") + proto.RegisterType((*RateLimit)(nil), "centauri.ratelimit.v1beta1.RateLimit") + proto.RegisterType((*WhitelistedAddressPair)(nil), "centauri.ratelimit.v1beta1.WhitelistedAddressPair") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/ratelimit.proto", fileDescriptor_825b72046a6cedeb) +} + +var fileDescriptor_825b72046a6cedeb = []byte{ + // 516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xdd, 0x6a, 0x13, 0x41, + 0x14, 0xde, 0xb1, 0x69, 0x34, 0x27, 0xb6, 0x8d, 0x43, 0x29, 0x21, 0xe0, 0x36, 0x06, 0x94, 0x52, + 0x71, 0x43, 0xab, 0x20, 0xe2, 0x55, 0x7f, 0x52, 0x5a, 0x2c, 0x12, 0x37, 0x52, 0xc5, 0x9b, 0x30, + 0xd9, 0x39, 0x66, 0x87, 0x66, 0x77, 0xe2, 0xec, 0x6c, 0x1a, 0xdf, 0xc0, 0x4b, 0xdf, 0xc1, 0x17, + 0xf0, 0x31, 0x7a, 0xd9, 0x4b, 0xf1, 0xa2, 0x48, 0x72, 0xed, 0x3b, 0xc8, 0xcc, 0x6e, 0x6c, 0x11, + 0x54, 0xac, 0x57, 0xbb, 0xe7, 0x9b, 0xef, 0x7c, 0x33, 0xdf, 0xcc, 0x77, 0x60, 0x3d, 0xc0, 0x58, + 0xb3, 0x54, 0x89, 0xa6, 0x62, 0x1a, 0x07, 0x22, 0x12, 0xba, 0x39, 0xda, 0xe8, 0xa1, 0x66, 0x1b, + 0x17, 0x88, 0x37, 0x54, 0x52, 0x4b, 0x5a, 0x9b, 0x71, 0xbd, 0x8b, 0x95, 0x9c, 0x5b, 0x5b, 0xee, + 0xcb, 0xbe, 0xb4, 0xb4, 0xa6, 0xf9, 0xcb, 0x3a, 0x1a, 0x4f, 0xa1, 0xd0, 0x66, 0x3a, 0xa4, 0xcb, + 0x30, 0xcf, 0x31, 0x96, 0x51, 0x95, 0xd4, 0xc9, 0x5a, 0xc9, 0xcf, 0x0a, 0x7a, 0x1b, 0x20, 0x08, + 0x59, 0x1c, 0xe3, 0xa0, 0x2b, 0x78, 0xf5, 0x9a, 0x5d, 0x2a, 0xe5, 0xc8, 0x01, 0x6f, 0x4c, 0x08, + 0xcc, 0xbf, 0x48, 0xa5, 0x66, 0xf4, 0x35, 0x54, 0x22, 0x36, 0xee, 0x0e, 0x51, 0x99, 0x13, 0x74, + 0x13, 0x8c, 0x79, 0xa6, 0xb4, 0xed, 0x9d, 0x9e, 0xaf, 0x3a, 0x5f, 0xcf, 0x57, 0xef, 0xf5, 0x85, + 0x0e, 0xd3, 0x9e, 0x17, 0xc8, 0xa8, 0x19, 0xc8, 0x24, 0x92, 0x49, 0xfe, 0x79, 0x90, 0xf0, 0xe3, + 0xa6, 0x7e, 0x3f, 0xc4, 0xc4, 0x3b, 0x88, 0xb5, 0xbf, 0x18, 0xb1, 0x71, 0x3b, 0x93, 0xe9, 0x60, + 0xcc, 0x7f, 0x55, 0x56, 0x18, 0x8c, 0xb2, 0x83, 0xfc, 0x8f, 0xb2, 0x8f, 0xc1, 0x88, 0xde, 0x85, + 0x45, 0x9e, 0x2a, 0xa6, 0x85, 0x8c, 0xbb, 0xa1, 0x4c, 0x55, 0x52, 0x9d, 0xab, 0x93, 0xb5, 0x82, + 0xbf, 0x30, 0x43, 0xf7, 0x0d, 0xd8, 0xf8, 0x4e, 0xa0, 0xb0, 0x37, 0x90, 0x27, 0x74, 0x0f, 0x8a, + 0x22, 0x7e, 0x3b, 0x90, 0x27, 0x57, 0x74, 0x96, 0x77, 0xd3, 0x7d, 0xb8, 0x2e, 0x53, 0x6d, 0x85, + 0xae, 0x66, 0x64, 0xd6, 0x4e, 0x3b, 0xb0, 0x30, 0x7b, 0x9e, 0x11, 0x1b, 0xa4, 0x68, 0x0d, 0xfc, + 0xbb, 0xde, 0xcd, 0x5c, 0xe4, 0xc8, 0x68, 0x34, 0x3e, 0x13, 0x28, 0xf9, 0x4c, 0xe3, 0xa1, 0x49, + 0x0f, 0x7d, 0x04, 0x85, 0x21, 0xd3, 0xa1, 0xb5, 0x5c, 0xde, 0xac, 0x7b, 0xbf, 0x0f, 0x98, 0x67, + 0x72, 0xe4, 0x5b, 0x36, 0x7d, 0x0c, 0xf3, 0xef, 0x4c, 0x2e, 0xac, 0xc1, 0xf2, 0xe6, 0x9d, 0x3f, + 0xb5, 0xd9, 0x00, 0xf9, 0x19, 0xdf, 0x6c, 0x67, 0x2f, 0x66, 0xee, 0xef, 0xdb, 0x99, 0x37, 0xf1, + 0x2d, 0xbb, 0x71, 0x08, 0x2b, 0xaf, 0x42, 0x61, 0x08, 0x89, 0x46, 0xbe, 0xc5, 0xb9, 0xc2, 0x24, + 0x69, 0x33, 0xa1, 0xe8, 0x0a, 0x14, 0x4d, 0x16, 0x51, 0xe5, 0xb9, 0xce, 0x2b, 0x5a, 0x83, 0x1b, + 0x0a, 0x03, 0x14, 0x23, 0x54, 0x79, 0xac, 0x7f, 0xd6, 0xeb, 0x4f, 0x60, 0xa9, 0xcd, 0x82, 0x63, + 0xd4, 0xbb, 0x42, 0x61, 0x60, 0x82, 0x40, 0x97, 0xa0, 0xdc, 0xde, 0xda, 0x79, 0xd6, 0x7a, 0xd9, + 0xed, 0xb4, 0x9e, 0xef, 0x56, 0x9c, 0x4b, 0x80, 0xdf, 0xda, 0x39, 0xaa, 0x90, 0x5a, 0xe1, 0xc3, + 0x27, 0xd7, 0xd9, 0xbe, 0x7f, 0x3a, 0x71, 0xc9, 0xd9, 0xc4, 0x25, 0xdf, 0x26, 0x2e, 0xf9, 0x38, + 0x75, 0x9d, 0xb3, 0xa9, 0xeb, 0x7c, 0x99, 0xba, 0xce, 0x9b, 0x5b, 0xe3, 0x4b, 0xe3, 0x6b, 0xaf, + 0xbe, 0x57, 0xb4, 0x13, 0xf8, 0xf0, 0x47, 0x00, 0x00, 0x00, 0xff, 0xff, 0xac, 0xf5, 0x58, 0x11, + 0xe1, 0x03, 0x00, 0x00, +} + +func (m *Path) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Path) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Path) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x12 + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Quota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Quota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Quota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintRatelimit(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x18 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Flow) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Flow) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Flow) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.ChannelValue.Size() + i -= size + if _, err := m.ChannelValue.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size := m.Outflow.Size() + i -= size + if _, err := m.Outflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size := m.Inflow.Size() + i -= size + if _, err := m.Inflow.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Flow != nil { + { + size, err := m.Flow.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Quota != nil { + { + size, err := m.Quota.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Path != nil { + { + size, err := m.Path.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRatelimit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WhitelistedAddressPair) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WhitelistedAddressPair) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WhitelistedAddressPair) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Receiver) > 0 { + i -= len(m.Receiver) + copy(dAtA[i:], m.Receiver) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Receiver))) + i-- + dAtA[i] = 0x12 + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintRatelimit(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintRatelimit(dAtA []byte, offset int, v uint64) int { + offset -= sovRatelimit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Path) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func (m *Quota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.MaxPercentSend.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovRatelimit(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovRatelimit(uint64(m.DurationHours)) + } + return n +} + +func (m *Flow) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Inflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.Outflow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + l = m.ChannelValue.Size() + n += 1 + l + sovRatelimit(uint64(l)) + return n +} + +func (m *RateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Path != nil { + l = m.Path.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Quota != nil { + l = m.Quota.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + if m.Flow != nil { + l = m.Flow.Size() + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func (m *WhitelistedAddressPair) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + l = len(m.Receiver) + if l > 0 { + n += 1 + l + sovRatelimit(uint64(l)) + } + return n +} + +func sovRatelimit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozRatelimit(x uint64) (n int) { + return sovRatelimit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Path) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Path: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Path: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Quota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Flow) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Flow: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Flow: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Inflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Outflow", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Outflow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ChannelValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Path == nil { + m.Path = &Path{} + } + if err := m.Path.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quota", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quota == nil { + m.Quota = &Quota{} + } + if err := m.Quota.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flow", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flow == nil { + m.Flow = &Flow{} + } + if err := m.Flow.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WhitelistedAddressPair) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WhitelistedAddressPair: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WhitelistedAddressPair: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Receiver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRatelimit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRatelimit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRatelimit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Receiver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipRatelimit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthRatelimit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipRatelimit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowRatelimit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthRatelimit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupRatelimit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthRatelimit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthRatelimit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowRatelimit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupRatelimit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/ratelimit/types/tx.pb.go b/x/ratelimit/types/tx.pb.go new file mode 100644 index 000000000..cc47a6df0 --- /dev/null +++ b/x/ratelimit/types/tx.pb.go @@ -0,0 +1,2196 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: centauri/ratelimit/v1beta1/tx.proto + +package types + +import ( + context "context" + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/cosmos/cosmos-sdk/types/msgservice" + _ "github.com/cosmos/gogoproto/gogoproto" + grpc1 "github.com/cosmos/gogoproto/grpc" + proto "github.com/cosmos/gogoproto/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgAddRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgAddRateLimit) Reset() { *m = MsgAddRateLimit{} } +func (m *MsgAddRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimit) ProtoMessage() {} +func (*MsgAddRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{0} +} +func (m *MsgAddRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimit.Merge(m, src) +} +func (m *MsgAddRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimit proto.InternalMessageInfo + +func (m *MsgAddRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgAddRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgAddRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *MsgAddRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type MsgAddRateLimitResponse struct { +} + +func (m *MsgAddRateLimitResponse) Reset() { *m = MsgAddRateLimitResponse{} } +func (m *MsgAddRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgAddRateLimitResponse) ProtoMessage() {} +func (*MsgAddRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{1} +} +func (m *MsgAddRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgAddRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgAddRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgAddRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgAddRateLimitResponse.Merge(m, src) +} +func (m *MsgAddRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgAddRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgAddRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgAddRateLimitResponse proto.InternalMessageInfo + +type MsgUpdateRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + MaxPercentSend github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,4,opt,name=max_percent_send,json=maxPercentSend,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_send"` + MaxPercentRecv github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,5,opt,name=max_percent_recv,json=maxPercentRecv,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"max_percent_recv"` + DurationHours uint64 `protobuf:"varint,6,opt,name=duration_hours,json=durationHours,proto3" json:"duration_hours,omitempty"` +} + +func (m *MsgUpdateRateLimit) Reset() { *m = MsgUpdateRateLimit{} } +func (m *MsgUpdateRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimit) ProtoMessage() {} +func (*MsgUpdateRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{2} +} +func (m *MsgUpdateRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimit.Merge(m, src) +} +func (m *MsgUpdateRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimit proto.InternalMessageInfo + +func (m *MsgUpdateRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgUpdateRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +func (m *MsgUpdateRateLimit) GetDurationHours() uint64 { + if m != nil { + return m.DurationHours + } + return 0 +} + +type MsgUpdateRateLimitResponse struct { +} + +func (m *MsgUpdateRateLimitResponse) Reset() { *m = MsgUpdateRateLimitResponse{} } +func (m *MsgUpdateRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateRateLimitResponse) ProtoMessage() {} +func (*MsgUpdateRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{3} +} +func (m *MsgUpdateRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateRateLimitResponse.Merge(m, src) +} +func (m *MsgUpdateRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateRateLimitResponse proto.InternalMessageInfo + +type MsgRemoveRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *MsgRemoveRateLimit) Reset() { *m = MsgRemoveRateLimit{} } +func (m *MsgRemoveRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimit) ProtoMessage() {} +func (*MsgRemoveRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{4} +} +func (m *MsgRemoveRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimit.Merge(m, src) +} +func (m *MsgRemoveRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimit proto.InternalMessageInfo + +func (m *MsgRemoveRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgRemoveRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgRemoveRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type MsgRemoveRateLimitResponse struct { +} + +func (m *MsgRemoveRateLimitResponse) Reset() { *m = MsgRemoveRateLimitResponse{} } +func (m *MsgRemoveRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgRemoveRateLimitResponse) ProtoMessage() {} +func (*MsgRemoveRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{5} +} +func (m *MsgRemoveRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgRemoveRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgRemoveRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgRemoveRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgRemoveRateLimitResponse.Merge(m, src) +} +func (m *MsgRemoveRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgRemoveRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgRemoveRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgRemoveRateLimitResponse proto.InternalMessageInfo + +type MsgResetRateLimit struct { + // authority is the address that controls the module (defaults to x/gov unless + // overwritten). + Authority string `protobuf:"bytes,1,opt,name=authority,proto3" json:"authority,omitempty" yaml:"authority"` + Denom string `protobuf:"bytes,2,opt,name=denom,proto3" json:"denom,omitempty"` + ChannelId string `protobuf:"bytes,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` +} + +func (m *MsgResetRateLimit) Reset() { *m = MsgResetRateLimit{} } +func (m *MsgResetRateLimit) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimit) ProtoMessage() {} +func (*MsgResetRateLimit) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{6} +} +func (m *MsgResetRateLimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimit.Merge(m, src) +} +func (m *MsgResetRateLimit) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimit) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimit.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimit proto.InternalMessageInfo + +func (m *MsgResetRateLimit) GetAuthority() string { + if m != nil { + return m.Authority + } + return "" +} + +func (m *MsgResetRateLimit) GetDenom() string { + if m != nil { + return m.Denom + } + return "" +} + +func (m *MsgResetRateLimit) GetChannelId() string { + if m != nil { + return m.ChannelId + } + return "" +} + +type MsgResetRateLimitResponse struct { +} + +func (m *MsgResetRateLimitResponse) Reset() { *m = MsgResetRateLimitResponse{} } +func (m *MsgResetRateLimitResponse) String() string { return proto.CompactTextString(m) } +func (*MsgResetRateLimitResponse) ProtoMessage() {} +func (*MsgResetRateLimitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_6b20911f56917b5f, []int{7} +} +func (m *MsgResetRateLimitResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgResetRateLimitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgResetRateLimitResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgResetRateLimitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgResetRateLimitResponse.Merge(m, src) +} +func (m *MsgResetRateLimitResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgResetRateLimitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgResetRateLimitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgResetRateLimitResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgAddRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgAddRateLimit") + proto.RegisterType((*MsgAddRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgAddRateLimitResponse") + proto.RegisterType((*MsgUpdateRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgUpdateRateLimit") + proto.RegisterType((*MsgUpdateRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgUpdateRateLimitResponse") + proto.RegisterType((*MsgRemoveRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgRemoveRateLimit") + proto.RegisterType((*MsgRemoveRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgRemoveRateLimitResponse") + proto.RegisterType((*MsgResetRateLimit)(nil), "centauri.ratelimit.v1beta1.MsgResetRateLimit") + proto.RegisterType((*MsgResetRateLimitResponse)(nil), "centauri.ratelimit.v1beta1.MsgResetRateLimitResponse") +} + +func init() { + proto.RegisterFile("centauri/ratelimit/v1beta1/tx.proto", fileDescriptor_6b20911f56917b5f) +} + +var fileDescriptor_6b20911f56917b5f = []byte{ + // 534 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x95, 0xc1, 0x8a, 0xd3, 0x40, + 0x1c, 0xc6, 0x9b, 0xdd, 0xee, 0x42, 0x07, 0xac, 0xbb, 0x43, 0xb1, 0xd9, 0xa8, 0xe9, 0x12, 0x51, + 0x16, 0x97, 0x4d, 0xe8, 0x2e, 0x7a, 0xd8, 0xdb, 0xee, 0xc9, 0x05, 0x0b, 0x12, 0x15, 0xc4, 0x4b, + 0x99, 0x66, 0xc6, 0x34, 0xd8, 0xcc, 0x84, 0x99, 0x49, 0x48, 0x2f, 0x82, 0x5e, 0xbc, 0x28, 0xf8, + 0x28, 0x3e, 0xc6, 0x1e, 0xf7, 0x28, 0x1e, 0x8a, 0xb4, 0x88, 0x9e, 0x7d, 0x02, 0x49, 0xd2, 0xb4, + 0x6b, 0x42, 0x29, 0x45, 0x44, 0x0f, 0x9e, 0x26, 0x99, 0xff, 0x97, 0x6f, 0x7e, 0x33, 0xdf, 0x4c, + 0x06, 0xdc, 0x72, 0x08, 0x95, 0x28, 0xe4, 0x9e, 0xc5, 0x91, 0x24, 0x03, 0xcf, 0xf7, 0xa4, 0x15, + 0xb5, 0x7b, 0x44, 0xa2, 0xb6, 0x25, 0x63, 0x33, 0xe0, 0x4c, 0x32, 0xa8, 0xe5, 0x22, 0x73, 0x26, + 0x32, 0xa7, 0x22, 0xad, 0xe1, 0x32, 0x97, 0xa5, 0x32, 0x2b, 0x79, 0xca, 0xbe, 0xd0, 0x9a, 0x0e, + 0x13, 0x3e, 0x13, 0x96, 0x2f, 0x5c, 0x2b, 0x6a, 0x27, 0x4d, 0x56, 0x30, 0xbe, 0xae, 0x81, 0xab, + 0x1d, 0xe1, 0x9e, 0x60, 0x6c, 0x23, 0x49, 0x1e, 0x26, 0x56, 0xf0, 0x10, 0xd4, 0x50, 0x28, 0xfb, + 0x8c, 0x7b, 0x72, 0xa8, 0x2a, 0xbb, 0xca, 0x5e, 0xed, 0xb4, 0xf1, 0x63, 0xd4, 0xda, 0x1a, 0x22, + 0x7f, 0x70, 0x6c, 0xcc, 0x4a, 0x86, 0x3d, 0x97, 0xc1, 0x06, 0xd8, 0xc0, 0x84, 0x32, 0x5f, 0x5d, + 0x4b, 0xf4, 0x76, 0xf6, 0x02, 0x6f, 0x02, 0xe0, 0xf4, 0x11, 0xa5, 0x64, 0xd0, 0xf5, 0xb0, 0xba, + 0x9e, 0x96, 0x6a, 0xd3, 0x9e, 0x33, 0x0c, 0x9f, 0x81, 0x2d, 0x1f, 0xc5, 0xdd, 0x80, 0xf0, 0x64, + 0x42, 0x5d, 0x41, 0x28, 0x56, 0xab, 0xe9, 0x78, 0xe6, 0xf9, 0xa8, 0x55, 0xf9, 0x3c, 0x6a, 0xdd, + 0x71, 0x3d, 0xd9, 0x0f, 0x7b, 0xa6, 0xc3, 0x7c, 0x6b, 0x3a, 0x85, 0xac, 0x39, 0x10, 0xf8, 0xa5, + 0x25, 0x87, 0x01, 0x11, 0xe6, 0x19, 0x95, 0x76, 0xdd, 0x47, 0xf1, 0xa3, 0xcc, 0xe6, 0x31, 0xa1, + 0x25, 0x67, 0x4e, 0x9c, 0x48, 0xdd, 0xf8, 0x5d, 0x67, 0x9b, 0x38, 0x11, 0xbc, 0x0d, 0xea, 0x38, + 0xe4, 0x48, 0x7a, 0x8c, 0x76, 0xfb, 0x2c, 0xe4, 0x42, 0xdd, 0xdc, 0x55, 0xf6, 0xaa, 0xf6, 0x95, + 0xbc, 0xf7, 0x41, 0xd2, 0x79, 0x5c, 0x7f, 0xf3, 0xed, 0xe3, 0xdd, 0xf9, 0xfa, 0x18, 0x3b, 0xa0, + 0x59, 0x58, 0x66, 0x9b, 0x88, 0x80, 0x51, 0x41, 0x8c, 0xef, 0x6b, 0x00, 0x76, 0x84, 0xfb, 0x34, + 0xc0, 0x48, 0x92, 0xff, 0x29, 0xfc, 0xc9, 0x14, 0x6e, 0x00, 0xad, 0xbc, 0xd2, 0xb3, 0x20, 0xde, + 0x2b, 0x69, 0x10, 0x36, 0xf1, 0x59, 0xf4, 0x17, 0x82, 0x58, 0x40, 0x5b, 0xc0, 0x99, 0xd1, 0xbe, + 0x53, 0xc0, 0x76, 0x5a, 0x16, 0x44, 0xfe, 0x03, 0xb0, 0xd7, 0xc1, 0x4e, 0x89, 0x26, 0x67, 0x3d, + 0x7c, 0x5b, 0x05, 0xeb, 0x1d, 0xe1, 0xc2, 0x18, 0x34, 0x4e, 0x30, 0x7e, 0xc2, 0x11, 0x15, 0x2f, + 0x08, 0x9f, 0x53, 0xef, 0x9b, 0x8b, 0xff, 0x68, 0x66, 0xe1, 0xdc, 0x68, 0x47, 0x2b, 0x88, 0x73, + 0x02, 0xf8, 0x5a, 0x01, 0xcd, 0x2c, 0xf7, 0xf2, 0xe8, 0xe6, 0x12, 0xc3, 0xc2, 0x7e, 0xd1, 0xee, + 0xaf, 0xa6, 0xff, 0x85, 0x21, 0x4b, 0x73, 0x75, 0x86, 0xc2, 0x2e, 0x58, 0xca, 0xb0, 0x60, 0xd7, + 0xc0, 0x57, 0xe0, 0x5a, 0x9a, 0x51, 0x99, 0xe0, 0x60, 0xa9, 0xe3, 0xe5, 0x68, 0xb5, 0x7b, 0x2b, + 0xc9, 0xf3, 0xf1, 0x4f, 0xf7, 0xcf, 0xc7, 0xba, 0x72, 0x31, 0xd6, 0x95, 0x2f, 0x63, 0x5d, 0xf9, + 0x30, 0xd1, 0x2b, 0x17, 0x13, 0xbd, 0xf2, 0x69, 0xa2, 0x57, 0x9e, 0x6f, 0xc7, 0x97, 0xae, 0xbc, + 0xf4, 0xe4, 0xf7, 0x36, 0xd3, 0x3b, 0xea, 0xe8, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x59, + 0x49, 0x80, 0x15, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + AddTransferRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) + UpdateTransferRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) + RemoveTransferRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) + ResetTransferRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) AddTransferRateLimit(ctx context.Context, in *MsgAddRateLimit, opts ...grpc.CallOption) (*MsgAddRateLimitResponse, error) { + out := new(MsgAddRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/AddTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateTransferRateLimit(ctx context.Context, in *MsgUpdateRateLimit, opts ...grpc.CallOption) (*MsgUpdateRateLimitResponse, error) { + out := new(MsgUpdateRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/UpdateTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) RemoveTransferRateLimit(ctx context.Context, in *MsgRemoveRateLimit, opts ...grpc.CallOption) (*MsgRemoveRateLimitResponse, error) { + out := new(MsgRemoveRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/RemoveTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) ResetTransferRateLimit(ctx context.Context, in *MsgResetRateLimit, opts ...grpc.CallOption) (*MsgResetRateLimitResponse, error) { + out := new(MsgResetRateLimitResponse) + err := c.cc.Invoke(ctx, "/centauri.ratelimit.v1beta1.Msg/ResetTransferRateLimit", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + AddTransferRateLimit(context.Context, *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) + UpdateTransferRateLimit(context.Context, *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) + RemoveTransferRateLimit(context.Context, *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) + ResetTransferRateLimit(context.Context, *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) AddTransferRateLimit(ctx context.Context, req *MsgAddRateLimit) (*MsgAddRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) UpdateTransferRateLimit(ctx context.Context, req *MsgUpdateRateLimit) (*MsgUpdateRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) RemoveTransferRateLimit(ctx context.Context, req *MsgRemoveRateLimit) (*MsgRemoveRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method RemoveTransferRateLimit not implemented") +} +func (*UnimplementedMsgServer) ResetTransferRateLimit(ctx context.Context, req *MsgResetRateLimit) (*MsgResetRateLimitResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ResetTransferRateLimit not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_AddTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgAddRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).AddTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/AddTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).AddTransferRateLimit(ctx, req.(*MsgAddRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/UpdateTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateTransferRateLimit(ctx, req.(*MsgUpdateRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_RemoveTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgRemoveRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).RemoveTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/RemoveTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).RemoveTransferRateLimit(ctx, req.(*MsgRemoveRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_ResetTransferRateLimit_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgResetRateLimit) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).ResetTransferRateLimit(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/centauri.ratelimit.v1beta1.Msg/ResetTransferRateLimit", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).ResetTransferRateLimit(ctx, req.(*MsgResetRateLimit)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "centauri.ratelimit.v1beta1.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AddTransferRateLimit", + Handler: _Msg_AddTransferRateLimit_Handler, + }, + { + MethodName: "UpdateTransferRateLimit", + Handler: _Msg_UpdateTransferRateLimit_Handler, + }, + { + MethodName: "RemoveTransferRateLimit", + Handler: _Msg_RemoveTransferRateLimit_Handler, + }, + { + MethodName: "ResetTransferRateLimit", + Handler: _Msg_ResetTransferRateLimit_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "centauri/ratelimit/v1beta1/tx.proto", +} + +func (m *MsgAddRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgAddRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgAddRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgAddRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DurationHours != 0 { + i = encodeVarintTx(dAtA, i, uint64(m.DurationHours)) + i-- + dAtA[i] = 0x30 + } + { + size := m.MaxPercentRecv.Size() + i -= size + if _, err := m.MaxPercentRecv.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + { + size := m.MaxPercentSend.Size() + i -= size + if _, err := m.MaxPercentSend.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTx(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgRemoveRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgRemoveRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgRemoveRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ChannelId) > 0 { + i -= len(m.ChannelId) + copy(dAtA[i:], m.ChannelId) + i = encodeVarintTx(dAtA, i, uint64(len(m.ChannelId))) + i-- + dAtA[i] = 0x1a + } + if len(m.Denom) > 0 { + i -= len(m.Denom) + copy(dAtA[i:], m.Denom) + i = encodeVarintTx(dAtA, i, uint64(len(m.Denom))) + i-- + dAtA[i] = 0x12 + } + if len(m.Authority) > 0 { + i -= len(m.Authority) + copy(dAtA[i:], m.Authority) + i = encodeVarintTx(dAtA, i, uint64(len(m.Authority))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgResetRateLimitResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgResetRateLimitResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgResetRateLimitResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintTx(dAtA []byte, offset int, v uint64) int { + offset -= sovTx(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgAddRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgAddRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = m.MaxPercentSend.Size() + n += 1 + l + sovTx(uint64(l)) + l = m.MaxPercentRecv.Size() + n += 1 + l + sovTx(uint64(l)) + if m.DurationHours != 0 { + n += 1 + sovTx(uint64(m.DurationHours)) + } + return n +} + +func (m *MsgUpdateRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgRemoveRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgRemoveRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgResetRateLimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Authority) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.Denom) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + l = len(m.ChannelId) + if l > 0 { + n += 1 + l + sovTx(uint64(l)) + } + return n +} + +func (m *MsgResetRateLimitResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovTx(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozTx(x uint64) (n int) { + return sovTx(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgAddRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgAddRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgAddRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentSend", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentSend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxPercentRecv", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.MaxPercentRecv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DurationHours", wireType) + } + m.DurationHours = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DurationHours |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgRemoveRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgRemoveRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Authority", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Authority = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Denom", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Denom = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTx + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTx + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgResetRateLimitResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTx + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgResetRateLimitResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTx(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTx + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTx(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTx + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthTx + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupTx + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthTx + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthTx = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTx = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupTx = fmt.Errorf("proto: unexpected end of group") +)