diff --git a/app/app.go b/app/app.go index f84763c36..1d6808630 100644 --- a/app/app.go +++ b/app/app.go @@ -204,6 +204,10 @@ import ( // Force-load the tracer engines to trigger registration due to Go-Ethereum v1.10.15 changes _ "github.com/ethereum/go-ethereum/eth/tracers/js" _ "github.com/ethereum/go-ethereum/eth/tracers/native" + + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator" + coordinatorkeeper "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" + coordinatortypes "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" ) // Name defines the application binary name @@ -274,6 +278,7 @@ var ( avs.AppModuleBasic{}, oracle.AppModuleBasic{}, distr.AppModuleBasic{}, + coordinator.AppModuleBasic{}, ) // module account permissions @@ -351,15 +356,16 @@ type ExocoreApp struct { EpochsKeeper epochskeeper.Keeper // exocore assets module keepers - AssetsKeeper assetsKeeper.Keeper - DelegationKeeper delegationKeeper.Keeper - RewardKeeper rewardKeeper.Keeper - OperatorKeeper operatorKeeper.Keeper - ExoSlashKeeper slashKeeper.Keeper - AVSManagerKeeper avsManagerKeeper.Keeper - OracleKeeper oracleKeeper.Keeper - ExomintKeeper exomintkeeper.Keeper - DistrKeeper distrkeeper.Keeper + AssetsKeeper assetsKeeper.Keeper + DelegationKeeper delegationKeeper.Keeper + RewardKeeper rewardKeeper.Keeper + OperatorKeeper operatorKeeper.Keeper + ExoSlashKeeper slashKeeper.Keeper + AVSManagerKeeper avsManagerKeeper.Keeper + OracleKeeper oracleKeeper.Keeper + ExomintKeeper exomintkeeper.Keeper + DistrKeeper distrkeeper.Keeper + CoordinatorKeeper coordinatorkeeper.Keeper // the module manager mm *module.Manager @@ -445,6 +451,7 @@ func NewExocoreApp( oracleTypes.StoreKey, exominttypes.StoreKey, distrtypes.StoreKey, + coordinatortypes.StoreKey, ) tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey, evmtypes.TransientKey, feemarkettypes.TransientKey) @@ -621,6 +628,7 @@ func NewExocoreApp( scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibcexported.ModuleName) scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) scopedICAHostKeeper := app.CapabilityKeeper.ScopeToModule(icahosttypes.SubModuleName) + scopedIBCProviderKeeper := app.CapabilityKeeper.ScopeToModule(coordinatortypes.ModuleName) // Applications that wish to enforce statically created ScopedKeepers should call `Seal` // after creating their scoped modules in `NewApp` with `ScopeToModule` app.CapabilityKeeper.Seal() @@ -779,30 +787,70 @@ func NewExocoreApp( transferStack = transfer.NewIBCModule(app.TransferKeeper) transferStack = erc20.NewIBCMiddleware(app.Erc20Keeper, transferStack) + // we are the x/appchain coordinator chain, so we add the keeper, but + // it's added after IBC to allow communication between chains. + // however, it is added before the IBC router is set (and sealed), so that + // it can handle messages from other chains. + app.CoordinatorKeeper = coordinatorkeeper.NewKeeper( + appCodec, keys[coordinatortypes.StoreKey], + app.AVSManagerKeeper, app.EpochsKeeper, app.OperatorKeeper, + app.StakingKeeper, app.DelegationKeeper, + app.IBCKeeper.ClientKeeper, &app.IBCKeeper.PortKeeper, + scopedIBCProviderKeeper, app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ConnectionKeeper, app.AccountKeeper, + ) + + coordinatorModule := coordinator.NewAppModule( + appCodec, + app.CoordinatorKeeper, + ) + coordinatorIbcModule := coordinator.NewIBCModule(app.CoordinatorKeeper) + // Create static IBC router, add transfer route, then set and seal it ibcRouter := porttypes.NewRouter() ibcRouter. AddRoute(icahosttypes.SubModuleName, icaHostIBCModule). - AddRoute(ibctransfertypes.ModuleName, transferStack) + AddRoute(ibctransfertypes.ModuleName, transferStack). + AddRoute(coordinatortypes.ModuleName, coordinatorIbcModule) app.IBCKeeper.SetRouter(ibcRouter) + // we are the x/appchain coordinator chain, so we add the keeper, but + // it's added after IBC to allow communication between chains. + app.CoordinatorKeeper = coordinatorkeeper.NewKeeper( + appCodec, keys[coordinatortypes.StoreKey], + app.AVSManagerKeeper, app.EpochsKeeper, app.OperatorKeeper, + app.StakingKeeper, app.DelegationKeeper, + app.IBCKeeper.ClientKeeper, &app.IBCKeeper.PortKeeper, + scopedIBCProviderKeeper, app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ConnectionKeeper, app.AccountKeeper, + ) + // set the hooks at the end, after all modules are instantiated. (&app.OperatorKeeper).SetHooks( - app.StakingKeeper.OperatorHooks(), + operatorTypes.NewMultiOperatorHooks( + // the order is not super relevant because these functions are independent + app.StakingKeeper.OperatorHooks(), + app.CoordinatorKeeper.OperatorHooks(), + ), ) (&app.DelegationKeeper).SetHooks( - app.StakingKeeper.DelegationHooks(), + delegationTypes.NewMultiDelegationHooks( + // the order is not super relevant because these functions are independent + app.StakingKeeper.DelegationHooks(), + app.CoordinatorKeeper.DelegationHooks(), + ), ) (&app.EpochsKeeper).SetHooks( epochstypes.NewMultiEpochHooks( - app.DistrKeeper.EpochsHooks(), // come first for using the voting power of last epoch - app.OperatorKeeper.EpochsHooks(), // must come before staking keeper so it can set the USD value - app.StakingKeeper.EpochsHooks(), // at this point, the order is irrelevant. - app.ExomintKeeper.EpochsHooks(), // however, this may change once we have distribution - app.AVSManagerKeeper.EpochsHooks(), // no-op for now + app.DistrKeeper.EpochsHooks(), // come first for using the voting power of last epoch + app.OperatorKeeper.EpochsHooks(), // must come before staking keeper so it can set the USD value + app.ExomintKeeper.EpochsHooks(), // must happen after distribution but not relevant otherwise + app.StakingKeeper.EpochsHooks(), // after operator == good + app.AVSManagerKeeper.EpochsHooks(), // after operator == good + app.CoordinatorKeeper.EpochsHooks(), // after operator == good ), ) @@ -901,6 +949,7 @@ func NewExocoreApp( avs.NewAppModule(appCodec, app.AVSManagerKeeper), oracle.NewAppModule(appCodec, app.OracleKeeper, app.AccountKeeper, app.BankKeeper), distr.NewAppModule(appCodec, app.DistrKeeper), + coordinatorModule, ) // During begin block slashing happens after reward.BeginBlocker so that @@ -939,6 +988,7 @@ func NewExocoreApp( avsManagerTypes.ModuleName, oracleTypes.ModuleName, distrtypes.ModuleName, + coordinatortypes.ModuleName, ) app.mm.SetOrderEndBlockers( @@ -973,6 +1023,7 @@ func NewExocoreApp( exoslashTypes.ModuleName, avsManagerTypes.ModuleName, distrtypes.ModuleName, + coordinatortypes.ModuleName, // op module feemarkettypes.ModuleName, // last in order to retrieve the block gas used ) @@ -1015,6 +1066,7 @@ func NewExocoreApp( rewardTypes.ModuleName, // not fully implemented yet exoslashTypes.ModuleName, // not fully implemented yet distrtypes.ModuleName, + coordinatortypes.ModuleName, // not fully implemented yet // must be the last module after others have been set up, so that it can check // the invariants (if configured to do so). crisistypes.ModuleName, diff --git a/precompiles/avs/avs_test.go b/precompiles/avs/avs_test.go index ca8b66f14..a587566cf 100644 --- a/precompiles/avs/avs_test.go +++ b/precompiles/avs/avs_test.go @@ -1,12 +1,13 @@ package avs_test import ( + "math/big" + "time" + "cosmossdk.io/math" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" avskeeper "github.com/ExocoreNetwork/exocore/x/avs/keeper" "github.com/ExocoreNetwork/exocore/x/avs/types" - "math/big" - "time" sdkmath "cosmossdk.io/math" operatorKeeper "github.com/ExocoreNetwork/exocore/x/operator/keeper" diff --git a/proto/exocore/appchain/common/v1/common.proto b/proto/exocore/appchain/common/v1/common.proto index d3ff2ff90..7dfd11be5 100644 --- a/proto/exocore/appchain/common/v1/common.proto +++ b/proto/exocore/appchain/common/v1/common.proto @@ -3,7 +3,9 @@ syntax = "proto3"; package exocore.appchain.common.v1; import "amino/amino.proto"; +import "cosmos_proto/cosmos.proto"; import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; import "google/protobuf/duration.proto"; import "ibc/lightclients/tendermint/v1/tendermint.proto"; import "tendermint/abci/types.proto"; @@ -26,7 +28,7 @@ message SubscriberParams { // the rewards from the subscriber to the coordinator. It is used in the event // that a channel between coordinator and subscriber exists prior to the // provision of security from Exocore to the appchain. Until a changeover - // process is implemented, it is currently unused. (TODO). The advantage + // process is implemented, it is currently unused (TODO). The advantage // of reusing a channel that was already in place is that the coin denomination // which contains a hash of the channel name will remain unchanged. string distribution_transmission_channel = 2; @@ -101,7 +103,24 @@ message CoordinatorInfo { ibc.lightclients.tendermint.v1.ClientState client_state = 1; // consensus_state is the consensus state of the coordinator chain. ibc.lightclients.tendermint.v1.ConsensusState consensus_state = 2; - // initial_val_set is the initial validator set of the coordinator chain. + // initial_val_set is the initial validator set of the subscriber chain. repeated .tendermint.abci.ValidatorUpdate initial_val_set = 3 [ (gogoproto.nullable) = false ]; +} + +// SubscriberValidator is a validator structure on the subscriber chain. It is stored +// within the subscriber module, indexed by a prefix + consensus address, and +// within the coordinator module, indexed by a prefix + chain id + consensus address. +message SubscriberValidator { + // address, as derived from the consensus key. No correlation with the operator + // address on Exocore. + bytes cons_address = 1; + // power is the vote power of the validator + int64 power = 2; + // pubkey is the consensus public key of the validator, as a Protobuf Any. + // this type is chosen to match the x/staking/validator type. + google.protobuf.Any pubkey = 3 [ + (cosmos_proto.accepts_interface) = "cosmos.crypto.PubKey", + (gogoproto.moretags) = "yaml:\"consensus_pubkey\"" + ]; } \ No newline at end of file diff --git a/proto/exocore/appchain/common/v1/wire.proto b/proto/exocore/appchain/common/v1/wire.proto new file mode 100644 index 000000000..a0fe46890 --- /dev/null +++ b/proto/exocore/appchain/common/v1/wire.proto @@ -0,0 +1,87 @@ +syntax = "proto3"; + +package exocore.appchain.common.v1; + +import "cosmos/staking/v1beta1/staking.proto"; +import "gogoproto/gogo.proto"; +import "tendermint/abci/types.proto"; + +option go_package = "github.com/ExocoreNetwork/exocore/x/appchain/common/types"; + +// This file containts all of the information that is sent over the wire by either +// the coordinator or each of the subcribers. + +message HandshakeMetadata { + // coordinator_fee_pool_addr is the address on the coordinator to which the + // subscriber chain will send the fees proportionally and periodically. + string coordinator_fee_pool_addr = 1; + // version is the version of the appchain protocol + string version = 2; +} + +// SlashPacketData is sent from the subscriber chain to the coordinator chain +// to request the slashing of a validator as a result of an infraction committed +// on the subscriber chain. +message SlashPacketData { + // validator is the validator to be slashed + tendermint.abci.Validator validator = 1 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"validator\"" + ]; + // valset_update_id is the id of the validator set change during which + // the infraction was committed + uint64 valset_update_id = 2 [ (gogoproto.customname) = "ValsetUpdateID" ]; + // infraction refers to the type of infraction committed + cosmos.staking.v1beta1.Infraction infraction = 3; +} + +// VscMaturedPacketData is sent from the subscriber chain to the coordinator chain +// to indicate that a VSC has matured and unbondings associated with that VSC +// can now be released. +message VscMaturedPacketData { + // valset_update_id is the id of the validator set change to mature. + uint64 valset_update_id = 1 [ (gogoproto.customname) = "ValsetUpdateID" ]; +} + +// SubscriberPacketData is the enum to identify the type of packet sent. +enum SubscriberPacketDataType { + option (gogoproto.goproto_enum_prefix) = false; + // SUBSCRIBER_PACKET_DATA_TYPE_UNSPECIFIED is the default value + SUBSCRIBER_PACKET_DATA_TYPE_UNSPECIFIED = 0 [ (gogoproto.enumvalue_customname) = "UnspecifiedPacket" ]; + // SUBSCRIBER_PACKET_DATA_TYPE_SLASH is the type of packet sent when a subscriber + // chain wants to request the slashing of a validator on the coordinator chain. + SUBSCRIBER_PACKET_DATA_TYPE_SLASH = 1 [ (gogoproto.enumvalue_customname) = "SlashPacket" ]; + // SUBSCRIBER_PACKET_DATA_TYPE_VSC_MATURED is the type of packet sent when a subscriber + // chain wants to indicate that a VSC has matured and unbondings associated with + // that VSC can now be released. + SUBSCRIBER_PACKET_DATA_TYPE_VSC_MATURED = 2 [ (gogoproto.enumvalue_customname) = "VscMaturedPacket" ]; +} + +// SubscriberPacketData is a wrapped message that contains the type of packet +// and the data associated with that packet. +message SubscriberPacketData { + // type is the type of packet sent + SubscriberPacketDataType type = 1; + // data is the data associated with the packet + oneof data { + // slash_packet_data is the data associated with a SlashPacket + SlashPacketData slash_packet_data = 2; + // vsc_matured_packet_data is the data associated with a VscMaturedPacket + VscMaturedPacketData vsc_matured_packet_data = 3; + } +} + +// ValidatorSetChangePacketData is sent from the coordinator chain to the subscriber chain +// containing the new validator set and the id of the validator set change. +message ValidatorSetChangePacketData { + // validator_updates is the edits to the existing validator set + repeated .tendermint.abci.ValidatorUpdate validator_updates = 1 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"validator_updates\"" + ]; + // valset_update_id is the id of the validator set change + uint64 valset_update_id = 2 [(gogoproto.customname) = "ValsetUpdateID"]; + // slash_acks is the list of consensus addresses slashed on the coordinator chain, + // in response to such requests from the subscriber chain. + repeated bytes slash_acks = 3; +} \ No newline at end of file diff --git a/proto/exocore/appchain/coordinator/v1/coordinator.proto b/proto/exocore/appchain/coordinator/v1/coordinator.proto index feff10b5f..65d100d0e 100644 --- a/proto/exocore/appchain/coordinator/v1/coordinator.proto +++ b/proto/exocore/appchain/coordinator/v1/coordinator.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package exocore.appchain.coordinator.v1; +import "exocore/appchain/common/v1/wire.proto"; import "exocore/appchain/coordinator/v1/tx.proto"; import "gogoproto/gogo.proto"; @@ -21,3 +22,21 @@ message ChainIDs { repeated string list = 1; } +// ConsensusAddresses is a list of consensus addresses. +message ConsensusAddresses { + // list is the list of consensus addresses. + repeated bytes list = 1; +} + +// ValidatorSetChangePackets is a helper structure to store a list of packets +message ValidatorSetChangePackets { + // list is the list of packets to be sent to the subscriber chain. + repeated .exocore.appchain.common.v1.ValidatorSetChangePacketData list = 1 + [(gogoproto.nullable) = false]; +} + +// UndelegationRecordKeys is a collection of undelegation record keys. +message UndelegationRecordKeys { + // list is the list of undelegation record keys. + repeated bytes list = 1; +} diff --git a/proto/exocore/appchain/subscriber/v1/genesis.proto b/proto/exocore/appchain/subscriber/v1/genesis.proto index 2df7a1654..af0bc9a7c 100644 --- a/proto/exocore/appchain/subscriber/v1/genesis.proto +++ b/proto/exocore/appchain/subscriber/v1/genesis.proto @@ -4,11 +4,35 @@ package exocore.appchain.subscriber.v1; import "exocore/appchain/common/v1/common.proto"; import "gogoproto/gogo.proto"; +import "google/protobuf/timestamp.proto"; option go_package = "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types"; // GenesisState is the genesis state for the appchain subscriber module. message GenesisState { - // Params is the parameters for the appchain subscriber module. + // The first two fields are word-for-word pulled from `common.proto`, to be + // filled by the coordinator module (or an export). + // params is the parameters for the appchain subscriber module. exocore.appchain.common.v1.SubscriberParams params = 1 [(gogoproto.nullable) = false]; + // coordinator is the coordinator information for the subscriber. + exocore.appchain.common.v1.CoordinatorInfo coordinator = 2 [ (gogoproto.nullable) = false ]; + // Below are the IBC parameters + // coordinator_client_id is the client id of the coordinator chain. + string coordinator_client_id = 3 [ (gogoproto.customname) = "CoordinatorClientID" ]; + // coordinator_channel_id is the channel id of the coordinator chain. + string coordinator_channel_id = 4 [ (gogoproto.customname) = "CoordinatorChannelID" ]; + // operational parameters that are to be exported can go here. } + +// MaturingVSCPacket represents a vsc packet that is maturing internal to the +// subscriber module, where it has not yet relayed a VSCMatured packet back. +// While it is technically feasible to store this just as a key in the state, +// keeping it as a separate type allows exporting the genesis data. +// The key used is prefix + time + vscId. +message MaturingVSCPacket { + // vsc_id is the id of the VSC that is maturing. + uint64 vsc_id = 1 [ (gogoproto.customname) = "ValidatorSetChangeID" ]; + // maturity_time is the time at which the VSC will mature. + google.protobuf.Timestamp maturity_time = 2 + [ (gogoproto.stdtime) = true, (gogoproto.nullable) = false ]; +} \ No newline at end of file diff --git a/testutil/keeper/coordinator.go b/testutil/keeper/coordinator.go new file mode 100644 index 000000000..a1d92d01b --- /dev/null +++ b/testutil/keeper/coordinator.go @@ -0,0 +1,93 @@ +package keeper + +import ( + "testing" + + tmdb "github.com/cometbft/cometbft-db" + "github.com/cometbft/cometbft/libs/log" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" +) + +// MockedKeepers contains all the mocked keepers +type MockedKeepers struct { + AVSKeeper *types.MockAVSKeeper + EpochsKeeper *types.MockEpochsKeeper + StakingKeeper *types.MockStakingKeeper + OperatorKeeper *types.MockOperatorKeeper + DelegationKeeper *types.MockDelegationKeeper + ClientKeeper *commontypes.MockClientKeeper + PortKeeper *commontypes.MockPortKeeper + ScopedKeeper *commontypes.MockScopedKeeper + ChannelKeeper *commontypes.MockChannelKeeper + ConnectionKeeper *commontypes.MockConnectionKeeper + AccountKeeper *commontypes.MockAccountKeeper +} + +func NewCoordinatorKeeper(t testing.TB) (keeper.Keeper, sdk.Context, MockedKeepers) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(registry) + cdc := codec.NewProtoCodec(registry) + + ctx := sdk.NewContext(stateStore, cmtproto.Header{}, false, log.TestingLogger()) + + // Create mock controller + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + // Create mock keepers + mockedKeepers := MockedKeepers{ + AVSKeeper: types.NewMockAVSKeeper(ctrl), + EpochsKeeper: types.NewMockEpochsKeeper(ctrl), + StakingKeeper: types.NewMockStakingKeeper(ctrl), + OperatorKeeper: types.NewMockOperatorKeeper(ctrl), + DelegationKeeper: types.NewMockDelegationKeeper(ctrl), + ClientKeeper: commontypes.NewMockClientKeeper(ctrl), + PortKeeper: commontypes.NewMockPortKeeper(ctrl), + ScopedKeeper: commontypes.NewMockScopedKeeper(ctrl), + ChannelKeeper: commontypes.NewMockChannelKeeper(ctrl), + ConnectionKeeper: commontypes.NewMockConnectionKeeper(ctrl), + AccountKeeper: commontypes.NewMockAccountKeeper(ctrl), + } + + k := keeper.NewKeeper( + cdc, + storeKey, + mockedKeepers.AVSKeeper, + mockedKeepers.EpochsKeeper, + mockedKeepers.OperatorKeeper, + mockedKeepers.StakingKeeper, + mockedKeepers.DelegationKeeper, + mockedKeepers.ClientKeeper, + mockedKeepers.PortKeeper, + mockedKeepers.ScopedKeeper, + mockedKeepers.ChannelKeeper, + mockedKeepers.ConnectionKeeper, + mockedKeepers.AccountKeeper, + ) + + // Initialize params + k.SetParams(ctx, types.DefaultParams()) + + return k, ctx, mockedKeepers +} diff --git a/testutil/keeper/subscriber.go b/testutil/keeper/subscriber.go new file mode 100644 index 000000000..562697c21 --- /dev/null +++ b/testutil/keeper/subscriber.go @@ -0,0 +1,88 @@ +package keeper + +import ( + "testing" + + tmdb "github.com/cometbft/cometbft-db" + "github.com/cometbft/cometbft/libs/log" + cmtproto "github.com/cometbft/cometbft/proto/tendermint/types" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" +) + +// SubscriberMockedKeepers contains all the mocked keepers +type SubscriberMockedKeepers struct { + AccountKeeper *commontypes.MockAccountKeeper + BankKeeper *commontypes.MockBankKeeper + ScopedKeeper *commontypes.MockScopedKeeper + PortKeeper *commontypes.MockPortKeeper + ClientKeeper *commontypes.MockClientKeeper + ConnectionKeeper *commontypes.MockConnectionKeeper + ChannelKeeper *commontypes.MockChannelKeeper + IBCCoreKeeper *commontypes.MockIBCCoreKeeper + IBCTransferKeeper *commontypes.MockIBCTransferKeeper +} + +func NewSubscriberKeeper(t testing.TB) (keeper.Keeper, sdk.Context, SubscriberMockedKeepers) { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, storetypes.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, storetypes.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) + + registry := codectypes.NewInterfaceRegistry() + cryptocodec.RegisterInterfaces(registry) + cdc := codec.NewProtoCodec(registry) + + ctx := sdk.NewContext(stateStore, cmtproto.Header{}, false, log.TestingLogger()) + + // Create mock controller + ctrl := gomock.NewController(t) + t.Cleanup(ctrl.Finish) + + // Create mock keepers + mockedKeepers := SubscriberMockedKeepers{ + AccountKeeper: commontypes.NewMockAccountKeeper(ctrl), + BankKeeper: commontypes.NewMockBankKeeper(ctrl), + ScopedKeeper: commontypes.NewMockScopedKeeper(ctrl), + PortKeeper: commontypes.NewMockPortKeeper(ctrl), + ClientKeeper: commontypes.NewMockClientKeeper(ctrl), + ConnectionKeeper: commontypes.NewMockConnectionKeeper(ctrl), + ChannelKeeper: commontypes.NewMockChannelKeeper(ctrl), + IBCCoreKeeper: commontypes.NewMockIBCCoreKeeper(ctrl), + IBCTransferKeeper: commontypes.NewMockIBCTransferKeeper(ctrl), + } + + k := keeper.NewKeeper( + cdc, + storeKey, + mockedKeepers.AccountKeeper, + mockedKeepers.BankKeeper, + mockedKeepers.ScopedKeeper, + mockedKeepers.PortKeeper, + mockedKeepers.ClientKeeper, + mockedKeepers.ConnectionKeeper, + mockedKeepers.ChannelKeeper, + mockedKeepers.IBCCoreKeeper, + mockedKeepers.IBCTransferKeeper, + "fee_collector", // feeCollectorName + ) + + // Initialize params if needed + // k.SetParams(ctx, types.DefaultParams()) + + return k, ctx, mockedKeepers +} diff --git a/testutil/tx/signer.go b/testutil/tx/signer.go index f966d2d9d..616d2af2f 100644 --- a/testutil/tx/signer.go +++ b/testutil/tx/signer.go @@ -42,6 +42,16 @@ func GenerateAddress() common.Address { return addr } +func GenerateAccAddress() sdk.AccAddress { + addr, _ := NewAccAddressAndKey() + return addr +} + +func GenerateConsAddress() sdk.ConsAddress { + addr, _ := NewConsAddressAndKey() + return addr +} + // GenerateConsensusKey generates a consensus key. func GenerateConsensusKey() keytypes.WrappedConsKey { privVal := mock.NewPV() @@ -52,6 +62,15 @@ func GenerateConsensusKey() keytypes.WrappedConsKey { return keytypes.NewWrappedConsKeyFromHex(hexutil.Encode(pubKey.Bytes())) } +func NewConsAddressAndKey() (sdk.ConsAddress, mock.PV) { + privVal := mock.NewPV() + pubKey, err := privVal.GetPubKey() + if err != nil { + return nil, mock.PV{} + } + return sdk.ConsAddress(pubKey.Address()), privVal +} + var _ keyring.Signer = &Signer{} // Signer defines a type that is used on testing for signing MsgEthereumTx diff --git a/testutil/utils.go b/testutil/utils.go index 52c189c7f..81b78d3b8 100644 --- a/testutil/utils.go +++ b/testutil/utils.go @@ -238,7 +238,7 @@ func (suite *BaseTestSuite) SetupWithGenesisValSet(genAccs []authtypes.GenesisAc suite.Require().NotNil(pubKey) pubKey2 := testutiltx.GenerateConsensusKey() suite.Require().NotNil(pubKey2) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(utils.DefaultChainID) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(utils.DefaultChainID) operatorConsKeys := []operatortypes.OperatorConsKeyRecord{ { OperatorAddress: operator1.String(), diff --git a/utils/utils.go b/utils/utils.go index 176cb0a95..316c76897 100644 --- a/utils/utils.go +++ b/utils/utils.go @@ -7,6 +7,8 @@ import ( ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + abci "github.com/cometbft/cometbft/abci/types" + "github.com/evmos/evmos/v16/crypto/ethsecp256k1" "golang.org/x/exp/constraints" "golang.org/x/xerrors" @@ -210,3 +212,81 @@ func SortByPower( } return sortedOperatorAddrs, sortedPubKeys, sortedPowers } + +// AccumulateChanges accumulates the current and new validator updates and returns +// a list of unique validator updates. The list is sorted by power in descending order. +func AccumulateChanges( + currentChanges, newChanges []abci.ValidatorUpdate, +) []abci.ValidatorUpdate { + // get only unique updates + m := make(map[string]abci.ValidatorUpdate) + for _, change := range currentChanges { + m[change.PubKey.String()] = change + } + for _, change := range newChanges { + m[change.PubKey.String()] = change + } + + // convert to list + out := make([]abci.ValidatorUpdate, 0, len(m)) + for _, update := range m { + out = append(out, update) + } + + // The list of tendermint updates should hash the same across all consensus nodes + // that means it is necessary to sort for determinism. + sort.Slice(out, func(i, j int) bool { + if out[i].Power != out[j].Power { + return out[i].Power > out[j].Power + } + return out[i].PubKey.String() > out[j].PubKey.String() + }) + + return out +} + +// AppendMany appends a variable number of byte slices together +func AppendMany(slices ...[]byte) (out []byte) { + for _, slice := range slices { + out = append(out, slice...) + } + return out +} + +// ChainIDWithoutRevision returns the chainID without the revision number. +// For example, "exocoretestnet_233-1" returns "exocoretestnet_233". +// In the case of app chains, it is not used because upgrading the subscriber +// isn't handled yet, because, during an upgrade, it is not safe to assume +// that the same set of operators will continue with the new chainID. +// The coordinator upgrade also similarly needs to be +// designed and implemented, but that it should be a trivial fix like +// deploying a new IBC client. +func ChainIDWithoutRevision(chainID string) string { + if !ibcclienttypes.IsRevisionFormat(chainID) { + return chainID + } + splitStr := strings.Split(chainID, "-") + return splitStr[0] +} + +// ChainIDWithLenKey returns the key with the following format: +// bytePrefix | len(chainId) | chainId +// This is similar to Solidity's ABI encoding. +// The caller should typically append a constant length byte array to this and use it as a key. +func ChainIDWithLenKey(chainID string) []byte { + chainIDL := len(chainID) + return AppendMany( + // Append the chainID length + // #nosec G701 + sdk.Uint64ToBigEndian(uint64(chainIDL)), + // Append the chainID + []byte(chainID), + ) +} + +// PanicIfNil panics if the input is nil with the given message. +func PanicIfNil(x interface{}, msg string) { + if x == nil { + panic("zero or nil value for " + msg) + } +} diff --git a/x/appchain/common/types/common.pb.go b/x/appchain/common/types/common.pb.go index 88a7fba0b..217f78316 100644 --- a/x/appchain/common/types/common.pb.go +++ b/x/appchain/common/types/common.pb.go @@ -6,6 +6,8 @@ package types import ( fmt "fmt" types "github.com/cometbft/cometbft/abci/types" + _ "github.com/cosmos/cosmos-proto" + types1 "github.com/cosmos/cosmos-sdk/codec/types" _ "github.com/cosmos/cosmos-sdk/types/tx/amino" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" @@ -40,7 +42,7 @@ type SubscriberParams struct { // the rewards from the subscriber to the coordinator. It is used in the event // that a channel between coordinator and subscriber exists prior to the // provision of security from Exocore to the appchain. Until a changeover - // process is implemented, it is currently unused. (TODO). The advantage + // process is implemented, it is currently unused (TODO). The advantage // of reusing a channel that was already in place is that the coin denomination // which contains a hash of the channel name will remain unchanged. DistributionTransmissionChannel string `protobuf:"bytes,2,opt,name=distribution_transmission_channel,json=distributionTransmissionChannel,proto3" json:"distribution_transmission_channel,omitempty"` @@ -259,7 +261,7 @@ type CoordinatorInfo struct { ClientState *_07_tendermint.ClientState `protobuf:"bytes,1,opt,name=client_state,json=clientState,proto3" json:"client_state,omitempty"` // consensus_state is the consensus state of the coordinator chain. ConsensusState *_07_tendermint.ConsensusState `protobuf:"bytes,2,opt,name=consensus_state,json=consensusState,proto3" json:"consensus_state,omitempty"` - // initial_val_set is the initial validator set of the coordinator chain. + // initial_val_set is the initial validator set of the subscriber chain. InitialValSet []types.ValidatorUpdate `protobuf:"bytes,3,rep,name=initial_val_set,json=initialValSet,proto3" json:"initial_val_set"` } @@ -317,10 +319,79 @@ func (m *CoordinatorInfo) GetInitialValSet() []types.ValidatorUpdate { return nil } +// SubscriberValidator is a validator structure on the subscriber chain. It is stored +// within the subscriber module, indexed by a prefix + consensus address, and +// within the coordinator module, indexed by a prefix + chain id + consensus address. +type SubscriberValidator struct { + // address, as derived from the consensus key. No correlation with the operator + // address on Exocore. + ConsAddress []byte `protobuf:"bytes,1,opt,name=cons_address,json=consAddress,proto3" json:"cons_address,omitempty"` + // power is the vote power of the validator + Power int64 `protobuf:"varint,2,opt,name=power,proto3" json:"power,omitempty"` + // pubkey is the consensus public key of the validator, as a Protobuf Any. + // this type is chosen to match the x/staking/validator type. + Pubkey *types1.Any `protobuf:"bytes,3,opt,name=pubkey,proto3" json:"pubkey,omitempty" yaml:"consensus_pubkey"` +} + +func (m *SubscriberValidator) Reset() { *m = SubscriberValidator{} } +func (m *SubscriberValidator) String() string { return proto.CompactTextString(m) } +func (*SubscriberValidator) ProtoMessage() {} +func (*SubscriberValidator) Descriptor() ([]byte, []int) { + return fileDescriptor_71cb7b22d050d7a3, []int{3} +} +func (m *SubscriberValidator) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscriberValidator) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscriberValidator.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubscriberValidator) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscriberValidator.Merge(m, src) +} +func (m *SubscriberValidator) XXX_Size() int { + return m.Size() +} +func (m *SubscriberValidator) XXX_DiscardUnknown() { + xxx_messageInfo_SubscriberValidator.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscriberValidator proto.InternalMessageInfo + +func (m *SubscriberValidator) GetConsAddress() []byte { + if m != nil { + return m.ConsAddress + } + return nil +} + +func (m *SubscriberValidator) GetPower() int64 { + if m != nil { + return m.Power + } + return 0 +} + +func (m *SubscriberValidator) GetPubkey() *types1.Any { + if m != nil { + return m.Pubkey + } + return nil +} + func init() { proto.RegisterType((*SubscriberParams)(nil), "exocore.appchain.common.v1.SubscriberParams") proto.RegisterType((*SubscriberGenesisState)(nil), "exocore.appchain.common.v1.SubscriberGenesisState") proto.RegisterType((*CoordinatorInfo)(nil), "exocore.appchain.common.v1.CoordinatorInfo") + proto.RegisterType((*SubscriberValidator)(nil), "exocore.appchain.common.v1.SubscriberValidator") } func init() { @@ -328,59 +399,66 @@ func init() { } var fileDescriptor_71cb7b22d050d7a3 = []byte{ - // 819 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0xc1, 0x6e, 0x1c, 0x45, - 0x10, 0xf5, 0xc4, 0xc1, 0x24, 0xbd, 0x09, 0xbb, 0x6e, 0x25, 0xf1, 0x64, 0x81, 0xf5, 0xda, 0x42, - 0xc2, 0x22, 0x30, 0xa3, 0x18, 0x09, 0x09, 0x71, 0x01, 0xdb, 0x09, 0x8a, 0x85, 0x8c, 0xb5, 0x1b, - 0x82, 0x04, 0x12, 0xad, 0x9e, 0x9e, 0xda, 0xd9, 0x22, 0xb3, 0xdd, 0xa3, 0xee, 0x1e, 0x3b, 0xfc, - 0x02, 0x27, 0x8e, 0x7c, 0x02, 0x47, 0xae, 0xfc, 0x41, 0x8e, 0x39, 0x72, 0x0a, 0xc8, 0x3e, 0xf0, - 0x0f, 0x9c, 0x50, 0xf7, 0xcc, 0x78, 0x67, 0x1d, 0x19, 0xe7, 0xb2, 0xea, 0xee, 0x7a, 0xf5, 0xea, - 0x55, 0xd5, 0x56, 0x0d, 0x79, 0x1f, 0x9e, 0x29, 0xa1, 0x34, 0xc4, 0xbc, 0x28, 0xc4, 0x94, 0xa3, - 0x8c, 0x85, 0x9a, 0xcd, 0x94, 0x8c, 0x8f, 0xee, 0xd7, 0xa7, 0xa8, 0xd0, 0xca, 0x2a, 0xda, 0xaf, - 0x81, 0x51, 0x03, 0x8c, 0x6a, 0xf3, 0xd1, 0xfd, 0xfe, 0x2a, 0x9f, 0xa1, 0x54, 0xb1, 0xff, 0xad, - 0xe0, 0xfd, 0x5b, 0x99, 0xca, 0x94, 0x3f, 0xc6, 0xee, 0x54, 0xbf, 0x0e, 0x32, 0xa5, 0xb2, 0x1c, - 0x62, 0x7f, 0x4b, 0xca, 0x49, 0x9c, 0x96, 0x9a, 0x5b, 0x6c, 0x82, 0xf4, 0x63, 0x4c, 0x44, 0x9c, - 0x63, 0x36, 0xb5, 0x22, 0x47, 0x90, 0xd6, 0xc4, 0x16, 0x64, 0x0a, 0x7a, 0x86, 0xd2, 0x3a, 0x45, - 0xf3, 0x5b, 0xed, 0xf0, 0x76, 0xcb, 0xce, 0x13, 0x81, 0xb1, 0xfd, 0xa9, 0x00, 0x53, 0x19, 0x37, - 0xff, 0x5d, 0x21, 0xbd, 0x71, 0x99, 0x18, 0xa1, 0x31, 0x01, 0x7d, 0xc8, 0x35, 0x9f, 0x19, 0xfa, - 0x39, 0x79, 0x57, 0x28, 0xa5, 0x53, 0x94, 0xdc, 0x2a, 0xcd, 0x26, 0x00, 0xac, 0x50, 0x2a, 0x67, - 0x3c, 0x4d, 0x35, 0x33, 0x56, 0x87, 0xc1, 0x30, 0xd8, 0xba, 0x3e, 0xba, 0xdb, 0x02, 0x3d, 0x04, - 0x38, 0x54, 0x2a, 0xff, 0x22, 0x4d, 0xf5, 0xd8, 0x6a, 0xba, 0x4f, 0x36, 0x52, 0x34, 0x56, 0x63, - 0x52, 0x3a, 0xe9, 0xcc, 0x6a, 0x2e, 0xcd, 0x0c, 0x8d, 0x71, 0x17, 0x31, 0xe5, 0x52, 0x42, 0x1e, - 0x5e, 0xf1, 0x2c, 0xeb, 0x6d, 0xe0, 0xe3, 0x16, 0x6e, 0xb7, 0x82, 0xd1, 0xaf, 0xc9, 0x7b, 0x49, - 0xae, 0xc4, 0x53, 0xc3, 0x0a, 0xd0, 0xec, 0x42, 0xda, 0x70, 0x79, 0x18, 0x6c, 0x2d, 0x8f, 0x36, - 0x2a, 0xec, 0x21, 0xe8, 0xbd, 0x0b, 0x78, 0xe9, 0x57, 0x64, 0xd3, 0x9c, 0xa5, 0xcc, 0x34, 0x2c, - 0x50, 0x4e, 0x34, 0x17, 0xee, 0x10, 0x5e, 0xf5, 0xea, 0x86, 0x73, 0xe4, 0x68, 0x01, 0xf8, 0xb0, - 0xc6, 0xd1, 0x0d, 0x72, 0x43, 0xc3, 0x31, 0xd7, 0x29, 0x4b, 0x41, 0xaa, 0x59, 0xf8, 0x86, 0xf7, - 0xeb, 0x54, 0x6f, 0x7b, 0xee, 0x89, 0x02, 0xa1, 0x98, 0x08, 0x66, 0x71, 0x06, 0xaa, 0xb4, 0x2e, - 0x0d, 0x54, 0x69, 0xb8, 0x32, 0x0c, 0xb6, 0x3a, 0xdb, 0x77, 0xa3, 0xaa, 0xdf, 0x51, 0xd3, 0xef, - 0x68, 0xaf, 0xee, 0xf7, 0xce, 0x3b, 0xcf, 0x5f, 0xae, 0x2f, 0x9d, 0xbc, 0x5c, 0xef, 0x3d, 0xda, - 0xd9, 0x7d, 0x5c, 0xf9, 0x1e, 0x7a, 0xd7, 0x5f, 0xff, 0x5a, 0x0f, 0x46, 0x3d, 0x4c, 0xc4, 0xc2, - 0x2b, 0xfd, 0x9e, 0xac, 0xf9, 0x82, 0x4c, 0x40, 0x9f, 0x8f, 0xf5, 0xe6, 0x65, 0xb1, 0xae, 0xb9, - 0x58, 0x9e, 0xf7, 0x76, 0xc3, 0xb1, 0x48, 0x7e, 0x40, 0x7a, 0xa5, 0x4c, 0x94, 0x4c, 0x51, 0x66, - 0x0d, 0xeb, 0xb5, 0xd7, 0x67, 0xed, 0x9e, 0x39, 0xd7, 0x7c, 0x1f, 0x11, 0x3a, 0x45, 0x63, 0x95, - 0x46, 0xc1, 0x73, 0x06, 0xd2, 0x6a, 0x04, 0x13, 0x5e, 0xf7, 0x3d, 0x5c, 0x9d, 0x5b, 0x1e, 0x54, - 0x06, 0xfa, 0x09, 0x59, 0x33, 0x39, 0x37, 0xd3, 0xb3, 0xfe, 0xb0, 0x54, 0x1d, 0x4b, 0x97, 0x65, - 0xd8, 0xf5, 0x05, 0xbf, 0xed, 0xcd, 0x4d, 0x57, 0xf6, 0x6a, 0x23, 0xfd, 0x81, 0xdc, 0x69, 0x80, - 0xec, 0x47, 0x8e, 0x39, 0x6b, 0xa6, 0x29, 0xec, 0x5d, 0x26, 0xfe, 0x66, 0x23, 0xfe, 0xb7, 0x7f, - 0x7e, 0xff, 0x20, 0x18, 0xdd, 0x6a, 0x78, 0xf6, 0x39, 0xe6, 0x0d, 0x88, 0x7e, 0x46, 0xfa, 0xaf, - 0xe8, 0x2a, 0x93, 0x1c, 0x98, 0xc1, 0x4c, 0x86, 0xab, 0x5e, 0xda, 0xda, 0x39, 0x69, 0xce, 0x3e, - 0xc6, 0x4c, 0x6e, 0xfe, 0x11, 0x90, 0x3b, 0xf3, 0xe1, 0xfb, 0x12, 0x24, 0x18, 0x34, 0x63, 0xcb, - 0x2d, 0xd0, 0x7d, 0xb2, 0x52, 0xf8, 0x61, 0xf4, 0xb3, 0xd6, 0xd9, 0xfe, 0x30, 0xba, 0x78, 0xb7, - 0x44, 0xe7, 0x07, 0x78, 0xe7, 0xaa, 0x93, 0x3e, 0xaa, 0x19, 0xe8, 0x98, 0x74, 0x5a, 0x93, 0xea, - 0xc7, 0xae, 0xb3, 0x7d, 0xef, 0xff, 0x08, 0x77, 0xe7, 0xf0, 0x47, 0x72, 0xa2, 0x6a, 0xbe, 0x36, - 0xcb, 0xe6, 0xcf, 0x57, 0x48, 0xf7, 0x1c, 0x8c, 0x1e, 0x90, 0x1b, 0xd5, 0x4e, 0x62, 0xc6, 0x25, - 0x51, 0x4b, 0xbf, 0x17, 0x61, 0x22, 0xa2, 0xf6, 0xc6, 0x8a, 0x5a, 0x3b, 0xca, 0x45, 0xf3, 0xaf, - 0x3e, 0xef, 0x51, 0x47, 0xcc, 0x2f, 0xf4, 0x5b, 0xd2, 0x15, 0x4a, 0x1a, 0x90, 0xa6, 0x34, 0x35, - 0x65, 0x25, 0x3e, 0xba, 0x94, 0xb2, 0x71, 0xab, 0x58, 0xdf, 0x12, 0x0b, 0x77, 0x7a, 0x40, 0xba, - 0x28, 0xd1, 0x22, 0xcf, 0xd9, 0x11, 0xcf, 0x99, 0x01, 0x1b, 0x2e, 0x0f, 0x97, 0xb7, 0x3a, 0xdb, - 0xc3, 0x36, 0x8f, 0x5b, 0x96, 0xd1, 0x13, 0x9e, 0x63, 0xea, 0x32, 0xfc, 0xa6, 0x48, 0xb9, 0x85, - 0xba, 0x14, 0x37, 0x6b, 0xf7, 0x27, 0x3c, 0x1f, 0x83, 0xdd, 0x19, 0x3f, 0x3f, 0x19, 0x04, 0x2f, - 0x4e, 0x06, 0xc1, 0xdf, 0x27, 0x83, 0xe0, 0x97, 0xd3, 0xc1, 0xd2, 0x8b, 0xd3, 0xc1, 0xd2, 0x9f, - 0xa7, 0x83, 0xa5, 0xef, 0x3e, 0xcd, 0xd0, 0x4e, 0xcb, 0xc4, 0xd5, 0x36, 0x7e, 0x50, 0x15, 0xfc, - 0x00, 0xec, 0xb1, 0xd2, 0x4f, 0xe3, 0xe6, 0xab, 0xf2, 0xec, 0x95, 0xef, 0x8a, 0x5f, 0xd0, 0xc9, - 0x8a, 0xff, 0x4b, 0x7e, 0xfc, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xc6, 0x07, 0xf5, 0x7f, - 0x06, 0x00, 0x00, + // 930 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x55, 0x4f, 0x6f, 0x1c, 0x35, + 0x14, 0xcf, 0x34, 0x6d, 0x68, 0xbd, 0x29, 0xd9, 0x98, 0x6d, 0x33, 0x59, 0x60, 0x77, 0xb3, 0x42, + 0x22, 0xa2, 0x74, 0x46, 0xdd, 0x4a, 0x48, 0xc0, 0x85, 0x6c, 0xd2, 0xa2, 0x06, 0x14, 0x56, 0xbb, + 0xa5, 0x48, 0x20, 0x61, 0x79, 0x3c, 0xde, 0x5d, 0x93, 0x19, 0x7b, 0x64, 0x7b, 0x92, 0xee, 0x57, + 0xe0, 0xc4, 0x91, 0x8f, 0xc0, 0x11, 0x24, 0x2e, 0x7c, 0x83, 0x8a, 0x53, 0x8f, 0x9c, 0x02, 0x4a, + 0x0e, 0xdc, 0x39, 0x72, 0x42, 0xf6, 0x78, 0xb2, 0x7f, 0xa2, 0x90, 0x5e, 0x46, 0xf3, 0xfc, 0x7e, + 0xef, 0xe7, 0xdf, 0xf3, 0xf3, 0x7b, 0x06, 0xef, 0xd2, 0xe7, 0x82, 0x08, 0x49, 0x43, 0x9c, 0x65, + 0x64, 0x8c, 0x19, 0x0f, 0x89, 0x48, 0x53, 0xc1, 0xc3, 0xa3, 0x07, 0xee, 0x2f, 0xc8, 0xa4, 0xd0, + 0x02, 0xd6, 0x1d, 0x30, 0x28, 0x81, 0x81, 0x73, 0x1f, 0x3d, 0xa8, 0xaf, 0xe3, 0x94, 0x71, 0x11, + 0xda, 0x6f, 0x01, 0xaf, 0x6f, 0x12, 0xa1, 0x52, 0xa1, 0x90, 0xb5, 0xc2, 0xc2, 0x70, 0xae, 0xda, + 0x48, 0x8c, 0x44, 0xb1, 0x6e, 0xfe, 0xca, 0x80, 0x91, 0x10, 0xa3, 0x84, 0x86, 0xd6, 0x8a, 0xf2, + 0x61, 0x88, 0xf9, 0xc4, 0xb9, 0x1a, 0x8b, 0xae, 0x38, 0x97, 0x58, 0xb3, 0x52, 0x5a, 0x3d, 0x64, + 0x11, 0x09, 0x13, 0x36, 0x1a, 0x6b, 0x92, 0x30, 0xca, 0xb5, 0x0a, 0x35, 0xe5, 0x31, 0x95, 0x29, + 0xe3, 0xda, 0xe4, 0x31, 0xb5, 0x5c, 0xc0, 0x9b, 0x33, 0x7e, 0x1c, 0x11, 0x16, 0xea, 0x49, 0x46, + 0x9d, 0xbc, 0xf6, 0xbf, 0x2b, 0xa0, 0x3a, 0xc8, 0x23, 0x45, 0x24, 0x8b, 0xa8, 0xec, 0x61, 0x89, + 0x53, 0x05, 0x3f, 0x01, 0x6f, 0x13, 0x21, 0x64, 0xcc, 0x38, 0xd6, 0x42, 0xa2, 0x21, 0xa5, 0x28, + 0x13, 0x22, 0x41, 0x38, 0x8e, 0x25, 0x52, 0x5a, 0xfa, 0x5e, 0xcb, 0xdb, 0xbe, 0xd5, 0xdf, 0x9c, + 0x01, 0x3d, 0xa6, 0xb4, 0x27, 0x44, 0xb2, 0x13, 0xc7, 0x72, 0xa0, 0x25, 0xdc, 0x07, 0x5b, 0x31, + 0x53, 0x5a, 0xb2, 0x28, 0x37, 0xd2, 0x91, 0x96, 0x98, 0xab, 0x94, 0x29, 0x65, 0x0c, 0x32, 0xc6, + 0x9c, 0xd3, 0xc4, 0xbf, 0x66, 0x59, 0x9a, 0xb3, 0xc0, 0xa7, 0x33, 0xb8, 0xdd, 0x02, 0x06, 0xbf, + 0x00, 0xef, 0x44, 0x89, 0x20, 0x87, 0x0a, 0x65, 0x54, 0xa2, 0x4b, 0x69, 0xfd, 0xe5, 0x96, 0xb7, + 0xbd, 0xdc, 0xdf, 0x2a, 0xb0, 0x3d, 0x2a, 0xf7, 0x2e, 0xe1, 0x85, 0x9f, 0x83, 0xb6, 0x3a, 0x4f, + 0x19, 0x49, 0x3a, 0x47, 0x39, 0x94, 0x98, 0x98, 0x1f, 0xff, 0xba, 0x55, 0xd7, 0x9a, 0x22, 0xfb, + 0x73, 0xc0, 0xc7, 0x0e, 0x07, 0xb7, 0xc0, 0xaa, 0xa4, 0xc7, 0x58, 0xc6, 0x28, 0xa6, 0x5c, 0xa4, + 0xfe, 0x0d, 0x1b, 0x57, 0x29, 0xd6, 0xf6, 0xcc, 0x12, 0xa4, 0x00, 0xb2, 0x88, 0x20, 0xcd, 0x52, + 0x2a, 0x72, 0x6d, 0xd2, 0x60, 0x22, 0xf6, 0x57, 0x5a, 0xde, 0x76, 0xa5, 0xb3, 0x19, 0x14, 0xf5, + 0x0e, 0xca, 0x7a, 0x07, 0x7b, 0xae, 0xde, 0xdd, 0xb7, 0x5e, 0x9c, 0x34, 0x97, 0x4e, 0x4f, 0x9a, + 0xd5, 0x27, 0xdd, 0xdd, 0xa7, 0x45, 0x6c, 0xcf, 0x86, 0xfe, 0xf8, 0x67, 0xd3, 0xeb, 0x57, 0x59, + 0x44, 0xe6, 0x56, 0xe1, 0x37, 0x60, 0xc3, 0x1e, 0xc8, 0x90, 0xca, 0xc5, 0xbd, 0x5e, 0xbb, 0x6a, + 0xaf, 0x9b, 0x66, 0x2f, 0xcb, 0x7b, 0xa7, 0xe4, 0x98, 0x27, 0x3f, 0x00, 0xd5, 0x9c, 0x47, 0x82, + 0xc7, 0x8c, 0x8f, 0x4a, 0xd6, 0x9b, 0xaf, 0xce, 0xba, 0x76, 0x1e, 0xec, 0xf8, 0xee, 0x03, 0x38, + 0x66, 0x4a, 0x0b, 0xc9, 0x08, 0x4e, 0x10, 0xe5, 0x5a, 0x32, 0xaa, 0xfc, 0x5b, 0xb6, 0x86, 0xeb, + 0x53, 0xcf, 0xa3, 0xc2, 0x01, 0x3f, 0x00, 0x1b, 0x2a, 0xc1, 0x6a, 0x7c, 0x5e, 0x1f, 0x14, 0x8b, + 0x63, 0x6e, 0xb2, 0xf4, 0xd7, 0xec, 0x81, 0xdf, 0xb1, 0xee, 0xb2, 0x2a, 0x7b, 0xce, 0x09, 0xbf, + 0x05, 0x77, 0x4b, 0x20, 0xfa, 0x0e, 0xb3, 0x04, 0x95, 0xdd, 0xe4, 0x57, 0xaf, 0x12, 0x7f, 0xbb, + 0x14, 0xff, 0xd3, 0xdf, 0x3f, 0xbf, 0xe7, 0xf5, 0x6b, 0x25, 0xcf, 0x3e, 0x66, 0x49, 0x09, 0x82, + 0x1f, 0x83, 0xfa, 0x05, 0x5d, 0x79, 0x94, 0x50, 0xa4, 0xd8, 0x88, 0xfb, 0xeb, 0x56, 0xda, 0xc6, + 0x82, 0x34, 0xe3, 0x1f, 0xb0, 0x11, 0x6f, 0xff, 0xe6, 0x81, 0xbb, 0xd3, 0xe6, 0xfb, 0x94, 0x72, + 0xaa, 0x98, 0x1a, 0x68, 0xac, 0x29, 0xdc, 0x07, 0x2b, 0x99, 0x6d, 0x46, 0xdb, 0x6b, 0x95, 0xce, + 0xfb, 0xc1, 0xe5, 0x13, 0x29, 0x58, 0x6c, 0xe0, 0xee, 0x75, 0x23, 0xbd, 0xef, 0x18, 0xe0, 0x00, + 0x54, 0x66, 0x3a, 0xd5, 0xb6, 0x5d, 0xa5, 0x73, 0xef, 0xff, 0x08, 0x77, 0xa7, 0xf0, 0x27, 0x7c, + 0x28, 0x1c, 0xdf, 0x2c, 0x4b, 0xfb, 0xfb, 0x6b, 0x60, 0x6d, 0x01, 0x06, 0x0f, 0xc0, 0x6a, 0x31, + 0x93, 0x90, 0x32, 0x49, 0x38, 0xe9, 0xf7, 0x02, 0x16, 0x91, 0x60, 0x76, 0x62, 0x05, 0x33, 0x33, + 0xca, 0xec, 0x66, 0x57, 0x6d, 0xde, 0xfd, 0x0a, 0x99, 0x1a, 0xf0, 0x2b, 0xb0, 0x46, 0x04, 0x57, + 0x94, 0xab, 0x5c, 0x39, 0xca, 0x42, 0x7c, 0x70, 0x25, 0x65, 0x19, 0x56, 0xb0, 0xbe, 0x4e, 0xe6, + 0x6c, 0x78, 0x00, 0xd6, 0x18, 0x67, 0x9a, 0xe1, 0x04, 0x1d, 0xe1, 0x04, 0x29, 0xaa, 0xfd, 0xe5, + 0xd6, 0xf2, 0x76, 0xa5, 0xd3, 0x9a, 0xe5, 0x31, 0xc3, 0x32, 0x78, 0x86, 0x13, 0x16, 0x9b, 0x0c, + 0xbf, 0xcc, 0x62, 0xac, 0xa9, 0x3b, 0x8a, 0xdb, 0x2e, 0xfc, 0x19, 0x4e, 0x06, 0x54, 0xb7, 0x7f, + 0xf1, 0xc0, 0x1b, 0xd3, 0x22, 0x9c, 0x87, 0x98, 0xd9, 0x60, 0x76, 0xb6, 0x83, 0x93, 0xaa, 0xa2, + 0x96, 0xab, 0xe6, 0x1c, 0xb9, 0xda, 0x29, 0x96, 0x60, 0x0d, 0xdc, 0xc8, 0xc4, 0x31, 0x2d, 0xca, + 0xb2, 0xdc, 0x2f, 0x0c, 0x88, 0xc1, 0x4a, 0x96, 0x47, 0x87, 0x74, 0x62, 0xa7, 0x5a, 0xa5, 0x53, + 0xbb, 0x70, 0x4d, 0x77, 0xf8, 0xa4, 0xfb, 0xf0, 0x9f, 0x93, 0xe6, 0xc6, 0x04, 0xa7, 0xc9, 0x47, + 0xed, 0xe9, 0x39, 0x15, 0x71, 0xed, 0xdf, 0x7f, 0xbd, 0x5f, 0x73, 0x0f, 0x11, 0x91, 0x93, 0x4c, + 0x8b, 0xa0, 0x97, 0x47, 0x9f, 0xd1, 0x49, 0xdf, 0x11, 0x77, 0x07, 0x2f, 0x4e, 0x1b, 0xde, 0xcb, + 0xd3, 0x86, 0xf7, 0xd7, 0x69, 0xc3, 0xfb, 0xe1, 0xac, 0xb1, 0xf4, 0xf2, 0xac, 0xb1, 0xf4, 0xc7, + 0x59, 0x63, 0xe9, 0xeb, 0x0f, 0x47, 0x4c, 0x8f, 0xf3, 0xc8, 0xdc, 0x87, 0xf0, 0x51, 0x71, 0x49, + 0x0e, 0xa8, 0x3e, 0x16, 0xf2, 0x30, 0x2c, 0xdf, 0xcf, 0xe7, 0x17, 0x5e, 0x50, 0xfb, 0xa8, 0x44, + 0x2b, 0x56, 0xdf, 0xc3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x4e, 0x95, 0x63, 0x69, 0x07, + 0x00, 0x00, } func (m *SubscriberParams) Marshal() (dAtA []byte, err error) { @@ -598,6 +676,53 @@ func (m *CoordinatorInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SubscriberValidator) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriberValidator) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscriberValidator) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pubkey != nil { + { + size, err := m.Pubkey.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCommon(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Power != 0 { + i = encodeVarintCommon(dAtA, i, uint64(m.Power)) + i-- + dAtA[i] = 0x10 + } + if len(m.ConsAddress) > 0 { + i -= len(m.ConsAddress) + copy(dAtA[i:], m.ConsAddress) + i = encodeVarintCommon(dAtA, i, uint64(len(m.ConsAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintCommon(dAtA []byte, offset int, v uint64) int { offset -= sovCommon(v) base := offset @@ -692,6 +817,26 @@ func (m *CoordinatorInfo) Size() (n int) { return n } +func (m *SubscriberValidator) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConsAddress) + if l > 0 { + n += 1 + l + sovCommon(uint64(l)) + } + if m.Power != 0 { + n += 1 + sovCommon(uint64(m.Power)) + } + if m.Pubkey != nil { + l = m.Pubkey.Size() + n += 1 + l + sovCommon(uint64(l)) + } + return n +} + func sovCommon(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1382,6 +1527,145 @@ func (m *CoordinatorInfo) Unmarshal(dAtA []byte) error { } return nil } +func (m *SubscriberValidator) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriberValidator: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriberValidator: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsAddress", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConsAddress = append(m.ConsAddress[:0], dAtA[iNdEx:postIndex]...) + if m.ConsAddress == nil { + m.ConsAddress = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Power", wireType) + } + m.Power = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Power |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pubkey", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCommon + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCommon + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCommon + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pubkey == nil { + m.Pubkey = &types1.Any{} + } + if err := m.Pubkey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCommon(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCommon + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipCommon(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/appchain/common/types/errors.go b/x/appchain/common/types/errors.go new file mode 100644 index 000000000..4031161ce --- /dev/null +++ b/x/appchain/common/types/errors.go @@ -0,0 +1,36 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +var ( + ErrInvalidChannelFlow = errorsmod.Register( + ModuleName, 2, + "invalid message sent to channel end", + ) + ErrDuplicateChannel = errorsmod.Register( + ModuleName, 3, + "channel already exists", + ) + ErrInvalidVersion = errorsmod.Register( + ModuleName, 4, + "invalid version", + ) + ErrInvalidHandshakeMetadata = errorsmod.Register( + ModuleName, 5, + "invalid handshake metadata", + ) + ErrChannelNotFound = errorsmod.Register( + ModuleName, 6, + "channel not found", + ) + ErrClientNotFound = errorsmod.Register( + ModuleName, 7, + "client not found", + ) + ErrInvalidPacketData = errorsmod.Register( + ModuleName, 8, + "invalid packet data (but successfully unmarshalled)", + ) +) diff --git a/x/appchain/common/types/events.go b/x/appchain/common/types/events.go index 5d8899972..35a14bceb 100644 --- a/x/appchain/common/types/events.go +++ b/x/appchain/common/types/events.go @@ -1,5 +1,15 @@ package types const ( - AttributeChainID = "chain_id" + AttributeChainID = "chain_id" + AttributeKeyAckSuccess = "success" + AttributeKeyAck = "acknowledgement" + AttributeKeyAckError = "ack_error" + AttributeInfractionType = "infraction_type" + AttributeValidatorAddress = "validator_address" + AttributeValSetUpdateID = "valset_update_id" + + EventTypeChannelEstablished = "channel_established" + EventTypePacket = "common_packet" + EventTypeTimeout = "common_timeout" ) diff --git a/x/appchain/common/types/expected_keepers.go b/x/appchain/common/types/expected_keepers.go index 57f9416ae..15eb8245e 100644 --- a/x/appchain/common/types/expected_keepers.go +++ b/x/appchain/common/types/expected_keepers.go @@ -1,10 +1,19 @@ package types import ( + "context" + sdk "github.com/cosmos/cosmos-sdk/types" + auth "github.com/cosmos/cosmos-sdk/x/auth/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + conntypes "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" ) +// ClientKeeper defines the expected IBC client keeper type ClientKeeper interface { CreateClient( sdk.Context, ibcexported.ClientState, ibcexported.ConsensusState, @@ -17,3 +26,71 @@ type ClientKeeper interface { sdk.Context, ibcexported.Height, ) (ibcexported.ConsensusState, error) } + +// ScopedKeeper defines the expected IBC capability keeper +type ScopedKeeper interface { + GetCapability(sdk.Context, string) (*capabilitytypes.Capability, bool) + AuthenticateCapability(sdk.Context, *capabilitytypes.Capability, string) bool + ClaimCapability(sdk.Context, *capabilitytypes.Capability, string) error +} + +// PortKeeper defines the expected IBC port keeper +type PortKeeper interface { + BindPort(ctx sdk.Context, portID string) *capabilitytypes.Capability +} + +// ConnectionKeeper defines the expected IBC connection keeper +type ConnectionKeeper interface { + GetConnection(ctx sdk.Context, connectionID string) (conntypes.ConnectionEnd, bool) +} + +// ChannelKeeper defines the expected IBC channel keeper +type ChannelKeeper interface { + GetChannel(sdk.Context, string, string) (channeltypes.Channel, bool) + GetNextSequenceSend(sdk.Context, string, string) (uint64, bool) + SendPacket( + sdk.Context, *capabilitytypes.Capability, + string, string, clienttypes.Height, + uint64, []byte, + ) (uint64, error) + WriteAcknowledgement( + sdk.Context, *capabilitytypes.Capability, + ibcexported.PacketI, ibcexported.Acknowledgement, + ) error + ChanCloseInit( + sdk.Context, string, string, *capabilitytypes.Capability, + ) error + GetChannelConnection(sdk.Context, string, string) (string, ibcexported.ConnectionI, error) +} + +// IBCKeeper defines the expected interface needed for openning a +// channel +type IBCCoreKeeper interface { + ChannelOpenInit( + context.Context, *channeltypes.MsgChannelOpenInit, + ) (*channeltypes.MsgChannelOpenInitResponse, error) +} + +// AccountKeeper defines the expected account keeper +type AccountKeeper interface { + GetModuleAccount(ctx sdk.Context, name string) auth.ModuleAccountI +} + +// BankKeeper defines the expected bank keeper +type BankKeeper interface { + GetBalance(ctx sdk.Context, addr sdk.AccAddress, denom string) sdk.Coin + GetAllBalances(ctx sdk.Context, addr sdk.AccAddress) sdk.Coins + SendCoinsFromModuleToModule( + ctx sdk.Context, + senderModule, recipientModule string, + amt sdk.Coins, + ) error +} + +// IBCTransferKeeper defines the expected IBC transfer keeper +type IBCTransferKeeper interface { + Transfer( + context.Context, + *transfertypes.MsgTransfer, + ) (*transfertypes.MsgTransferResponse, error) +} diff --git a/x/appchain/common/types/expected_keepers_mocks.go b/x/appchain/common/types/expected_keepers_mocks.go new file mode 100644 index 000000000..4627925fd --- /dev/null +++ b/x/appchain/common/types/expected_keepers_mocks.go @@ -0,0 +1,539 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: x/appchain/common/types/expected_keepers.go +// +// Generated by this command: +// +// mockgen -source=x/appchain/common/types/expected_keepers.go -destination=x/appchain/common/types/expected_keepers_mocks.go -package=types +// + +// Package types is a generated GoMock package. +package types + +import ( + context "context" + reflect "reflect" + + types "github.com/cosmos/cosmos-sdk/types" + types0 "github.com/cosmos/cosmos-sdk/x/auth/types" + types1 "github.com/cosmos/cosmos-sdk/x/capability/types" + types2 "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + types3 "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + types4 "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" + types5 "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + exported "github.com/cosmos/ibc-go/v7/modules/core/exported" + gomock "go.uber.org/mock/gomock" +) + +// MockClientKeeper is a mock of ClientKeeper interface. +type MockClientKeeper struct { + ctrl *gomock.Controller + recorder *MockClientKeeperMockRecorder +} + +// MockClientKeeperMockRecorder is the mock recorder for MockClientKeeper. +type MockClientKeeperMockRecorder struct { + mock *MockClientKeeper +} + +// NewMockClientKeeper creates a new mock instance. +func NewMockClientKeeper(ctrl *gomock.Controller) *MockClientKeeper { + mock := &MockClientKeeper{ctrl: ctrl} + mock.recorder = &MockClientKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockClientKeeper) EXPECT() *MockClientKeeperMockRecorder { + return m.recorder +} + +// CreateClient mocks base method. +func (m *MockClientKeeper) CreateClient(arg0 types.Context, arg1 exported.ClientState, arg2 exported.ConsensusState) (string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateClient", arg0, arg1, arg2) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateClient indicates an expected call of CreateClient. +func (mr *MockClientKeeperMockRecorder) CreateClient(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateClient", reflect.TypeOf((*MockClientKeeper)(nil).CreateClient), arg0, arg1, arg2) +} + +// GetClientState mocks base method. +func (m *MockClientKeeper) GetClientState(arg0 types.Context, arg1 string) (exported.ClientState, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetClientState", arg0, arg1) + ret0, _ := ret[0].(exported.ClientState) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetClientState indicates an expected call of GetClientState. +func (mr *MockClientKeeperMockRecorder) GetClientState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClientState", reflect.TypeOf((*MockClientKeeper)(nil).GetClientState), arg0, arg1) +} + +// GetLatestClientConsensusState mocks base method. +func (m *MockClientKeeper) GetLatestClientConsensusState(arg0 types.Context, arg1 string) (exported.ConsensusState, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLatestClientConsensusState", arg0, arg1) + ret0, _ := ret[0].(exported.ConsensusState) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetLatestClientConsensusState indicates an expected call of GetLatestClientConsensusState. +func (mr *MockClientKeeperMockRecorder) GetLatestClientConsensusState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLatestClientConsensusState", reflect.TypeOf((*MockClientKeeper)(nil).GetLatestClientConsensusState), arg0, arg1) +} + +// GetSelfConsensusState mocks base method. +func (m *MockClientKeeper) GetSelfConsensusState(arg0 types.Context, arg1 exported.Height) (exported.ConsensusState, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetSelfConsensusState", arg0, arg1) + ret0, _ := ret[0].(exported.ConsensusState) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetSelfConsensusState indicates an expected call of GetSelfConsensusState. +func (mr *MockClientKeeperMockRecorder) GetSelfConsensusState(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSelfConsensusState", reflect.TypeOf((*MockClientKeeper)(nil).GetSelfConsensusState), arg0, arg1) +} + +// MockScopedKeeper is a mock of ScopedKeeper interface. +type MockScopedKeeper struct { + ctrl *gomock.Controller + recorder *MockScopedKeeperMockRecorder +} + +// MockScopedKeeperMockRecorder is the mock recorder for MockScopedKeeper. +type MockScopedKeeperMockRecorder struct { + mock *MockScopedKeeper +} + +// NewMockScopedKeeper creates a new mock instance. +func NewMockScopedKeeper(ctrl *gomock.Controller) *MockScopedKeeper { + mock := &MockScopedKeeper{ctrl: ctrl} + mock.recorder = &MockScopedKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockScopedKeeper) EXPECT() *MockScopedKeeperMockRecorder { + return m.recorder +} + +// AuthenticateCapability mocks base method. +func (m *MockScopedKeeper) AuthenticateCapability(arg0 types.Context, arg1 *types1.Capability, arg2 string) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "AuthenticateCapability", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// AuthenticateCapability indicates an expected call of AuthenticateCapability. +func (mr *MockScopedKeeperMockRecorder) AuthenticateCapability(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AuthenticateCapability", reflect.TypeOf((*MockScopedKeeper)(nil).AuthenticateCapability), arg0, arg1, arg2) +} + +// ClaimCapability mocks base method. +func (m *MockScopedKeeper) ClaimCapability(arg0 types.Context, arg1 *types1.Capability, arg2 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ClaimCapability", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// ClaimCapability indicates an expected call of ClaimCapability. +func (mr *MockScopedKeeperMockRecorder) ClaimCapability(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClaimCapability", reflect.TypeOf((*MockScopedKeeper)(nil).ClaimCapability), arg0, arg1, arg2) +} + +// GetCapability mocks base method. +func (m *MockScopedKeeper) GetCapability(arg0 types.Context, arg1 string) (*types1.Capability, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetCapability", arg0, arg1) + ret0, _ := ret[0].(*types1.Capability) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetCapability indicates an expected call of GetCapability. +func (mr *MockScopedKeeperMockRecorder) GetCapability(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetCapability", reflect.TypeOf((*MockScopedKeeper)(nil).GetCapability), arg0, arg1) +} + +// MockPortKeeper is a mock of PortKeeper interface. +type MockPortKeeper struct { + ctrl *gomock.Controller + recorder *MockPortKeeperMockRecorder +} + +// MockPortKeeperMockRecorder is the mock recorder for MockPortKeeper. +type MockPortKeeperMockRecorder struct { + mock *MockPortKeeper +} + +// NewMockPortKeeper creates a new mock instance. +func NewMockPortKeeper(ctrl *gomock.Controller) *MockPortKeeper { + mock := &MockPortKeeper{ctrl: ctrl} + mock.recorder = &MockPortKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockPortKeeper) EXPECT() *MockPortKeeperMockRecorder { + return m.recorder +} + +// BindPort mocks base method. +func (m *MockPortKeeper) BindPort(ctx types.Context, portID string) *types1.Capability { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "BindPort", ctx, portID) + ret0, _ := ret[0].(*types1.Capability) + return ret0 +} + +// BindPort indicates an expected call of BindPort. +func (mr *MockPortKeeperMockRecorder) BindPort(ctx, portID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BindPort", reflect.TypeOf((*MockPortKeeper)(nil).BindPort), ctx, portID) +} + +// MockConnectionKeeper is a mock of ConnectionKeeper interface. +type MockConnectionKeeper struct { + ctrl *gomock.Controller + recorder *MockConnectionKeeperMockRecorder +} + +// MockConnectionKeeperMockRecorder is the mock recorder for MockConnectionKeeper. +type MockConnectionKeeperMockRecorder struct { + mock *MockConnectionKeeper +} + +// NewMockConnectionKeeper creates a new mock instance. +func NewMockConnectionKeeper(ctrl *gomock.Controller) *MockConnectionKeeper { + mock := &MockConnectionKeeper{ctrl: ctrl} + mock.recorder = &MockConnectionKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockConnectionKeeper) EXPECT() *MockConnectionKeeperMockRecorder { + return m.recorder +} + +// GetConnection mocks base method. +func (m *MockConnectionKeeper) GetConnection(ctx types.Context, connectionID string) (types4.ConnectionEnd, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetConnection", ctx, connectionID) + ret0, _ := ret[0].(types4.ConnectionEnd) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetConnection indicates an expected call of GetConnection. +func (mr *MockConnectionKeeperMockRecorder) GetConnection(ctx, connectionID any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetConnection", reflect.TypeOf((*MockConnectionKeeper)(nil).GetConnection), ctx, connectionID) +} + +// MockChannelKeeper is a mock of ChannelKeeper interface. +type MockChannelKeeper struct { + ctrl *gomock.Controller + recorder *MockChannelKeeperMockRecorder +} + +// MockChannelKeeperMockRecorder is the mock recorder for MockChannelKeeper. +type MockChannelKeeperMockRecorder struct { + mock *MockChannelKeeper +} + +// NewMockChannelKeeper creates a new mock instance. +func NewMockChannelKeeper(ctrl *gomock.Controller) *MockChannelKeeper { + mock := &MockChannelKeeper{ctrl: ctrl} + mock.recorder = &MockChannelKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockChannelKeeper) EXPECT() *MockChannelKeeperMockRecorder { + return m.recorder +} + +// ChanCloseInit mocks base method. +func (m *MockChannelKeeper) ChanCloseInit(arg0 types.Context, arg1, arg2 string, arg3 *types1.Capability) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChanCloseInit", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// ChanCloseInit indicates an expected call of ChanCloseInit. +func (mr *MockChannelKeeperMockRecorder) ChanCloseInit(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChanCloseInit", reflect.TypeOf((*MockChannelKeeper)(nil).ChanCloseInit), arg0, arg1, arg2, arg3) +} + +// GetChannel mocks base method. +func (m *MockChannelKeeper) GetChannel(arg0 types.Context, arg1, arg2 string) (types5.Channel, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChannel", arg0, arg1, arg2) + ret0, _ := ret[0].(types5.Channel) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetChannel indicates an expected call of GetChannel. +func (mr *MockChannelKeeperMockRecorder) GetChannel(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChannel", reflect.TypeOf((*MockChannelKeeper)(nil).GetChannel), arg0, arg1, arg2) +} + +// GetChannelConnection mocks base method. +func (m *MockChannelKeeper) GetChannelConnection(arg0 types.Context, arg1, arg2 string) (string, exported.ConnectionI, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChannelConnection", arg0, arg1, arg2) + ret0, _ := ret[0].(string) + ret1, _ := ret[1].(exported.ConnectionI) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetChannelConnection indicates an expected call of GetChannelConnection. +func (mr *MockChannelKeeperMockRecorder) GetChannelConnection(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChannelConnection", reflect.TypeOf((*MockChannelKeeper)(nil).GetChannelConnection), arg0, arg1, arg2) +} + +// GetNextSequenceSend mocks base method. +func (m *MockChannelKeeper) GetNextSequenceSend(arg0 types.Context, arg1, arg2 string) (uint64, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetNextSequenceSend", arg0, arg1, arg2) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetNextSequenceSend indicates an expected call of GetNextSequenceSend. +func (mr *MockChannelKeeperMockRecorder) GetNextSequenceSend(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetNextSequenceSend", reflect.TypeOf((*MockChannelKeeper)(nil).GetNextSequenceSend), arg0, arg1, arg2) +} + +// SendPacket mocks base method. +func (m *MockChannelKeeper) SendPacket(arg0 types.Context, arg1 *types1.Capability, arg2, arg3 string, arg4 types3.Height, arg5 uint64, arg6 []byte) (uint64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendPacket", arg0, arg1, arg2, arg3, arg4, arg5, arg6) + ret0, _ := ret[0].(uint64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// SendPacket indicates an expected call of SendPacket. +func (mr *MockChannelKeeperMockRecorder) SendPacket(arg0, arg1, arg2, arg3, arg4, arg5, arg6 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendPacket", reflect.TypeOf((*MockChannelKeeper)(nil).SendPacket), arg0, arg1, arg2, arg3, arg4, arg5, arg6) +} + +// WriteAcknowledgement mocks base method. +func (m *MockChannelKeeper) WriteAcknowledgement(arg0 types.Context, arg1 *types1.Capability, arg2 exported.PacketI, arg3 exported.Acknowledgement) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "WriteAcknowledgement", arg0, arg1, arg2, arg3) + ret0, _ := ret[0].(error) + return ret0 +} + +// WriteAcknowledgement indicates an expected call of WriteAcknowledgement. +func (mr *MockChannelKeeperMockRecorder) WriteAcknowledgement(arg0, arg1, arg2, arg3 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "WriteAcknowledgement", reflect.TypeOf((*MockChannelKeeper)(nil).WriteAcknowledgement), arg0, arg1, arg2, arg3) +} + +// MockIBCCoreKeeper is a mock of IBCCoreKeeper interface. +type MockIBCCoreKeeper struct { + ctrl *gomock.Controller + recorder *MockIBCCoreKeeperMockRecorder +} + +// MockIBCCoreKeeperMockRecorder is the mock recorder for MockIBCCoreKeeper. +type MockIBCCoreKeeperMockRecorder struct { + mock *MockIBCCoreKeeper +} + +// NewMockIBCCoreKeeper creates a new mock instance. +func NewMockIBCCoreKeeper(ctrl *gomock.Controller) *MockIBCCoreKeeper { + mock := &MockIBCCoreKeeper{ctrl: ctrl} + mock.recorder = &MockIBCCoreKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIBCCoreKeeper) EXPECT() *MockIBCCoreKeeperMockRecorder { + return m.recorder +} + +// ChannelOpenInit mocks base method. +func (m *MockIBCCoreKeeper) ChannelOpenInit(arg0 context.Context, arg1 *types5.MsgChannelOpenInit) (*types5.MsgChannelOpenInitResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ChannelOpenInit", arg0, arg1) + ret0, _ := ret[0].(*types5.MsgChannelOpenInitResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ChannelOpenInit indicates an expected call of ChannelOpenInit. +func (mr *MockIBCCoreKeeperMockRecorder) ChannelOpenInit(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ChannelOpenInit", reflect.TypeOf((*MockIBCCoreKeeper)(nil).ChannelOpenInit), arg0, arg1) +} + +// MockAccountKeeper is a mock of AccountKeeper interface. +type MockAccountKeeper struct { + ctrl *gomock.Controller + recorder *MockAccountKeeperMockRecorder +} + +// MockAccountKeeperMockRecorder is the mock recorder for MockAccountKeeper. +type MockAccountKeeperMockRecorder struct { + mock *MockAccountKeeper +} + +// NewMockAccountKeeper creates a new mock instance. +func NewMockAccountKeeper(ctrl *gomock.Controller) *MockAccountKeeper { + mock := &MockAccountKeeper{ctrl: ctrl} + mock.recorder = &MockAccountKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAccountKeeper) EXPECT() *MockAccountKeeperMockRecorder { + return m.recorder +} + +// GetModuleAccount mocks base method. +func (m *MockAccountKeeper) GetModuleAccount(ctx types.Context, name string) types0.ModuleAccountI { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetModuleAccount", ctx, name) + ret0, _ := ret[0].(types0.ModuleAccountI) + return ret0 +} + +// GetModuleAccount indicates an expected call of GetModuleAccount. +func (mr *MockAccountKeeperMockRecorder) GetModuleAccount(ctx, name any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetModuleAccount", reflect.TypeOf((*MockAccountKeeper)(nil).GetModuleAccount), ctx, name) +} + +// MockBankKeeper is a mock of BankKeeper interface. +type MockBankKeeper struct { + ctrl *gomock.Controller + recorder *MockBankKeeperMockRecorder +} + +// MockBankKeeperMockRecorder is the mock recorder for MockBankKeeper. +type MockBankKeeperMockRecorder struct { + mock *MockBankKeeper +} + +// NewMockBankKeeper creates a new mock instance. +func NewMockBankKeeper(ctrl *gomock.Controller) *MockBankKeeper { + mock := &MockBankKeeper{ctrl: ctrl} + mock.recorder = &MockBankKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockBankKeeper) EXPECT() *MockBankKeeperMockRecorder { + return m.recorder +} + +// GetAllBalances mocks base method. +func (m *MockBankKeeper) GetAllBalances(ctx types.Context, addr types.AccAddress) types.Coins { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetAllBalances", ctx, addr) + ret0, _ := ret[0].(types.Coins) + return ret0 +} + +// GetAllBalances indicates an expected call of GetAllBalances. +func (mr *MockBankKeeperMockRecorder) GetAllBalances(ctx, addr any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAllBalances", reflect.TypeOf((*MockBankKeeper)(nil).GetAllBalances), ctx, addr) +} + +// GetBalance mocks base method. +func (m *MockBankKeeper) GetBalance(ctx types.Context, addr types.AccAddress, denom string) types.Coin { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetBalance", ctx, addr, denom) + ret0, _ := ret[0].(types.Coin) + return ret0 +} + +// GetBalance indicates an expected call of GetBalance. +func (mr *MockBankKeeperMockRecorder) GetBalance(ctx, addr, denom any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetBalance", reflect.TypeOf((*MockBankKeeper)(nil).GetBalance), ctx, addr, denom) +} + +// SendCoinsFromModuleToModule mocks base method. +func (m *MockBankKeeper) SendCoinsFromModuleToModule(ctx types.Context, senderModule, recipientModule string, amt types.Coins) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "SendCoinsFromModuleToModule", ctx, senderModule, recipientModule, amt) + ret0, _ := ret[0].(error) + return ret0 +} + +// SendCoinsFromModuleToModule indicates an expected call of SendCoinsFromModuleToModule. +func (mr *MockBankKeeperMockRecorder) SendCoinsFromModuleToModule(ctx, senderModule, recipientModule, amt any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SendCoinsFromModuleToModule", reflect.TypeOf((*MockBankKeeper)(nil).SendCoinsFromModuleToModule), ctx, senderModule, recipientModule, amt) +} + +// MockIBCTransferKeeper is a mock of IBCTransferKeeper interface. +type MockIBCTransferKeeper struct { + ctrl *gomock.Controller + recorder *MockIBCTransferKeeperMockRecorder +} + +// MockIBCTransferKeeperMockRecorder is the mock recorder for MockIBCTransferKeeper. +type MockIBCTransferKeeperMockRecorder struct { + mock *MockIBCTransferKeeper +} + +// NewMockIBCTransferKeeper creates a new mock instance. +func NewMockIBCTransferKeeper(ctrl *gomock.Controller) *MockIBCTransferKeeper { + mock := &MockIBCTransferKeeper{ctrl: ctrl} + mock.recorder = &MockIBCTransferKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockIBCTransferKeeper) EXPECT() *MockIBCTransferKeeperMockRecorder { + return m.recorder +} + +// Transfer mocks base method. +func (m *MockIBCTransferKeeper) Transfer(arg0 context.Context, arg1 *types2.MsgTransfer) (*types2.MsgTransferResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Transfer", arg0, arg1) + ret0, _ := ret[0].(*types2.MsgTransferResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Transfer indicates an expected call of Transfer. +func (mr *MockIBCTransferKeeperMockRecorder) Transfer(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Transfer", reflect.TypeOf((*MockIBCTransferKeeper)(nil).Transfer), arg0, arg1) +} diff --git a/x/appchain/common/types/keys.go b/x/appchain/common/types/keys.go new file mode 100644 index 000000000..de754c673 --- /dev/null +++ b/x/appchain/common/types/keys.go @@ -0,0 +1,16 @@ +package types + +const ( + // ModuleName is the name of the module + ModuleName = "appchain" + // Version is the current version of the module + Version = ModuleName + "-1" + // CoordinatorPortID is the default port id to which the coordinator module binds + CoordinatorPortID = "coordinator" + // SubscriberPortID is the default port id to which the subscriber module binds + SubscriberPortID = "subscriber" + // StoreKey defines the store key for the module (used in tests) + StoreKey = ModuleName + // MemStoreKey defines the in-memory store key (used in tests) + MemStoreKey = "mem_appchain" +) diff --git a/x/appchain/common/types/types.go b/x/appchain/common/types/types.go new file mode 100644 index 000000000..ae5d487aa --- /dev/null +++ b/x/appchain/common/types/types.go @@ -0,0 +1,32 @@ +package types + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +const maxLogSize = 1024 + +// NewResultAcknowledgementWithLog creates a result acknowledgement with a log message. +func NewResultAcknowledgementWithLog(ctx sdk.Context, res []byte) channeltypes.Acknowledgement { + if len(res) > maxLogSize { + ctx.Logger().Info( + "IBC ResultAcknowledgement constructed", + "res_size", len(res), + "res_preview", string(res[:maxLogSize]), + ) + } else { + ctx.Logger().Info( + "IBC ResultAcknowledgement constructed", + "res_size", len(res), + "res", string(res), + ) + } + return channeltypes.NewResultAcknowledgement(res) +} + +// NewErrorAcknowledgementWithLog creates an error acknowledgement with a log message. +func NewErrorAcknowledgementWithLog(ctx sdk.Context, err error) channeltypes.Acknowledgement { + ctx.Logger().Error("IBC ErrorAcknowledgement constructed", "error", err) + return channeltypes.NewErrorAcknowledgement(err) +} diff --git a/x/appchain/common/types/utils.go b/x/appchain/common/types/utils.go new file mode 100644 index 000000000..ccb8bfef5 --- /dev/null +++ b/x/appchain/common/types/utils.go @@ -0,0 +1,42 @@ +package types + +import ( + "time" + + errorsmod "cosmossdk.io/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" +) + +// SendIBCPacket sends an IBC packet with packetData over the source channelID and portID. +func SendIBCPacket( + ctx sdk.Context, + scopedKeeper ScopedKeeper, + channelKeeper ChannelKeeper, + sourceChannelID string, + sourcePortID string, + packetData []byte, + timeoutPeriod time.Duration, +) error { + _, ok := channelKeeper.GetChannel(ctx, sourcePortID, sourceChannelID) + if !ok { + return errorsmod.Wrapf(channeltypes.ErrChannelNotFound, "channel not found for channel ID: %s", sourceChannelID) + } + channelCap, ok := scopedKeeper.GetCapability(ctx, host.ChannelCapabilityPath(sourcePortID, sourceChannelID)) + if !ok { + return errorsmod.Wrap(channeltypes.ErrChannelCapabilityNotFound, "module does not own channel capability") + } + + _, err := channelKeeper.SendPacket(ctx, + channelCap, + sourcePortID, + sourceChannelID, + clienttypes.Height{}, // timeout height disabled + uint64(ctx.BlockTime().Add(timeoutPeriod).UnixNano()), // timeout timestamp + packetData, + ) + return err +} diff --git a/x/appchain/common/types/validator.go b/x/appchain/common/types/validator.go new file mode 100644 index 000000000..9f04b22a6 --- /dev/null +++ b/x/appchain/common/types/validator.go @@ -0,0 +1,46 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" + + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// NewSubscriberValidator creates a new SubscriberValidator instance. +func NewSubscriberValidator( + address []byte, power int64, pubKey cryptotypes.PubKey, +) (SubscriberValidator, error) { + pkAny, err := codectypes.NewAnyWithValue(pubKey) + if err != nil { + return SubscriberValidator{}, err + } + + return SubscriberValidator{ + ConsAddress: address, + Power: power, + Pubkey: pkAny, + }, nil +} + +// UnpackInterfaces implements UnpackInterfacesMessage.UnpackInterfaces. +// It is required to ensure that ConsPubKey below works. +func (sv SubscriberValidator) UnpackInterfaces(unpacker codectypes.AnyUnpacker) error { + var pk cryptotypes.PubKey + return unpacker.UnpackAny(sv.Pubkey, &pk) +} + +// ConsPubKey returns the validator PubKey as a cryptotypes.PubKey. +func (sv SubscriberValidator) ConsPubKey() (cryptotypes.PubKey, error) { + pk, ok := sv.Pubkey.GetCachedValue().(cryptotypes.PubKey) + if !ok { + return nil, errorsmod.Wrapf( + sdkerrors.ErrInvalidType, + "expecting cryptotypes.PubKey, got %T", + pk, + ) + } + + return pk, nil +} diff --git a/x/appchain/common/types/wire.go b/x/appchain/common/types/wire.go new file mode 100644 index 000000000..b93a59962 --- /dev/null +++ b/x/appchain/common/types/wire.go @@ -0,0 +1,95 @@ +package types + +import ( + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// WrappedSubscriberPacketData is a wrapper interface for SubscriberPacketData. It allows +// exposting the private interface defined in `wire.pb.go` to the outside world. +type WrappedSubscriberPacketData interface { + isSubscriberPacketData_Data +} + +// NewSubscriberPacketData creates a new SubscriberPacketData instance. +func NewSubscriberPacketData( + packetType SubscriberPacketDataType, packet isSubscriberPacketData_Data, +) SubscriberPacketData { + return SubscriberPacketData{ + Type: packetType, + Data: packet, + } +} + +// NewSlashPacketData creates a new SlashPacketData instance. +func NewSlashPacketData( + validator abci.Validator, + valUpdateID uint64, + infractionType stakingtypes.Infraction, +) *SlashPacketData { + return &SlashPacketData{ + Validator: validator, + ValsetUpdateID: valUpdateID, + Infraction: infractionType, + } +} + +// NewVscPacketData creates a new ValidatorSetChangePacketData instance. +func NewVscPacketData( + updates []abci.ValidatorUpdate, + valsetUpdateID uint64, + slashAcks [][]byte, +) ValidatorSetChangePacketData { + return ValidatorSetChangePacketData{ + ValidatorUpdates: updates, + ValsetUpdateID: valsetUpdateID, + SlashAcks: slashAcks, + } +} + +// NewVscPacketData creates a new VscMaturedPacketData instance. +func NewVscMaturedPacketData( + valsetUpdateID uint64, +) *VscMaturedPacketData { + return &VscMaturedPacketData{ + ValsetUpdateID: valsetUpdateID, + } +} + +// PacketAckResult is the acknowledgment result of a packet. +type PacketAckResult []byte + +var ( + // VscPacketHandledResult is the success acknowledgment result of a validator set change packet. + VscPacketHandledResult = PacketAckResult([]byte{byte(1)}) + // SlashPacketHandledResult is the success acknowledgment result of a slash packet. + SlashPacketHandledResult = PacketAckResult([]byte{byte(2)}) +) + +// Validate validates the SlashPacketData. It only performs stateless validation. +// (1) The address must be a valid consensus address. +// (2) The power must be positive. +// (3) The infraction type must be downtime. +func (vdt SlashPacketData) Validate() error { + // vdt.Validator.Address must be a consensus address + if err := sdk.VerifyAddressFormat(vdt.Validator.Address); err != nil { + return ErrInvalidPacketData.Wrapf("invalid validator: %s", err.Error()) + } + // vdt.Validator.Power must be positive + if vdt.Validator.Power <= 0 { + return ErrInvalidPacketData.Wrap("validator power must be positive") + } + // ValsetUpdateId can be zero for the first validator set, so we don't validate it here. + if vdt.Infraction != stakingtypes.Infraction_INFRACTION_DOWNTIME { + // only downtime infractions are supported at this time + return ErrInvalidPacketData.Wrapf("invalid infraction type: %s", vdt.Infraction.String()) + } + + return nil +} + +// Bytes returns the byte representation of the PacketAckResult. +func (res PacketAckResult) Bytes() []byte { + return res +} diff --git a/x/appchain/common/types/wire.pb.go b/x/appchain/common/types/wire.pb.go new file mode 100644 index 000000000..b8f974a49 --- /dev/null +++ b/x/appchain/common/types/wire.pb.go @@ -0,0 +1,1485 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: exocore/appchain/common/v1/wire.proto + +package types + +import ( + fmt "fmt" + types "github.com/cometbft/cometbft/abci/types" + types1 "github.com/cosmos/cosmos-sdk/x/staking/types" + _ "github.com/cosmos/gogoproto/gogoproto" + proto "github.com/cosmos/gogoproto/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// SubscriberPacketData is the enum to identify the type of packet sent. +type SubscriberPacketDataType int32 + +const ( + // SUBSCRIBER_PACKET_DATA_TYPE_UNSPECIFIED is the default value + UnspecifiedPacket SubscriberPacketDataType = 0 + // SUBSCRIBER_PACKET_DATA_TYPE_SLASH is the type of packet sent when a subscriber + // chain wants to request the slashing of a validator on the coordinator chain. + SlashPacket SubscriberPacketDataType = 1 + // SUBSCRIBER_PACKET_DATA_TYPE_VSC_MATURED is the type of packet sent when a subscriber + // chain wants to indicate that a VSC has matured and unbondings associated with + // that VSC can now be released. + VscMaturedPacket SubscriberPacketDataType = 2 +) + +var SubscriberPacketDataType_name = map[int32]string{ + 0: "SUBSCRIBER_PACKET_DATA_TYPE_UNSPECIFIED", + 1: "SUBSCRIBER_PACKET_DATA_TYPE_SLASH", + 2: "SUBSCRIBER_PACKET_DATA_TYPE_VSC_MATURED", +} + +var SubscriberPacketDataType_value = map[string]int32{ + "SUBSCRIBER_PACKET_DATA_TYPE_UNSPECIFIED": 0, + "SUBSCRIBER_PACKET_DATA_TYPE_SLASH": 1, + "SUBSCRIBER_PACKET_DATA_TYPE_VSC_MATURED": 2, +} + +func (x SubscriberPacketDataType) String() string { + return proto.EnumName(SubscriberPacketDataType_name, int32(x)) +} + +func (SubscriberPacketDataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{0} +} + +type HandshakeMetadata struct { + // coordinator_fee_pool_addr is the address on the coordinator to which the + // subscriber chain will send the fees proportionally and periodically. + CoordinatorFeePoolAddr string `protobuf:"bytes,1,opt,name=coordinator_fee_pool_addr,json=coordinatorFeePoolAddr,proto3" json:"coordinator_fee_pool_addr,omitempty"` + // version is the version of the appchain protocol + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (m *HandshakeMetadata) Reset() { *m = HandshakeMetadata{} } +func (m *HandshakeMetadata) String() string { return proto.CompactTextString(m) } +func (*HandshakeMetadata) ProtoMessage() {} +func (*HandshakeMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{0} +} +func (m *HandshakeMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HandshakeMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HandshakeMetadata.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HandshakeMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_HandshakeMetadata.Merge(m, src) +} +func (m *HandshakeMetadata) XXX_Size() int { + return m.Size() +} +func (m *HandshakeMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_HandshakeMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_HandshakeMetadata proto.InternalMessageInfo + +func (m *HandshakeMetadata) GetCoordinatorFeePoolAddr() string { + if m != nil { + return m.CoordinatorFeePoolAddr + } + return "" +} + +func (m *HandshakeMetadata) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +// SlashPacketData is sent from the subscriber chain to the coordinator chain +// to request the slashing of a validator as a result of an infraction committed +// on the subscriber chain. +type SlashPacketData struct { + // validator is the validator to be slashed + Validator types.Validator `protobuf:"bytes,1,opt,name=validator,proto3" json:"validator" yaml:"validator"` + // valset_update_id is the id of the validator set change during which + // the infraction was committed + ValsetUpdateID uint64 `protobuf:"varint,2,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` + // infraction refers to the type of infraction committed + Infraction types1.Infraction `protobuf:"varint,3,opt,name=infraction,proto3,enum=cosmos.staking.v1beta1.Infraction" json:"infraction,omitempty"` +} + +func (m *SlashPacketData) Reset() { *m = SlashPacketData{} } +func (m *SlashPacketData) String() string { return proto.CompactTextString(m) } +func (*SlashPacketData) ProtoMessage() {} +func (*SlashPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{1} +} +func (m *SlashPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SlashPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SlashPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SlashPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlashPacketData.Merge(m, src) +} +func (m *SlashPacketData) XXX_Size() int { + return m.Size() +} +func (m *SlashPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_SlashPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_SlashPacketData proto.InternalMessageInfo + +func (m *SlashPacketData) GetValidator() types.Validator { + if m != nil { + return m.Validator + } + return types.Validator{} +} + +func (m *SlashPacketData) GetValsetUpdateID() uint64 { + if m != nil { + return m.ValsetUpdateID + } + return 0 +} + +func (m *SlashPacketData) GetInfraction() types1.Infraction { + if m != nil { + return m.Infraction + } + return types1.Infraction_INFRACTION_UNSPECIFIED +} + +// VscMaturedPacketData is sent from the subscriber chain to the coordinator chain +// to indicate that a VSC has matured and unbondings associated with that VSC +// can now be released. +type VscMaturedPacketData struct { + // valset_update_id is the id of the validator set change to mature. + ValsetUpdateID uint64 `protobuf:"varint,1,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` +} + +func (m *VscMaturedPacketData) Reset() { *m = VscMaturedPacketData{} } +func (m *VscMaturedPacketData) String() string { return proto.CompactTextString(m) } +func (*VscMaturedPacketData) ProtoMessage() {} +func (*VscMaturedPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{2} +} +func (m *VscMaturedPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VscMaturedPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VscMaturedPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VscMaturedPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_VscMaturedPacketData.Merge(m, src) +} +func (m *VscMaturedPacketData) XXX_Size() int { + return m.Size() +} +func (m *VscMaturedPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_VscMaturedPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_VscMaturedPacketData proto.InternalMessageInfo + +func (m *VscMaturedPacketData) GetValsetUpdateID() uint64 { + if m != nil { + return m.ValsetUpdateID + } + return 0 +} + +// SubscriberPacketData is a wrapped message that contains the type of packet +// and the data associated with that packet. +type SubscriberPacketData struct { + // type is the type of packet sent + Type SubscriberPacketDataType `protobuf:"varint,1,opt,name=type,proto3,enum=exocore.appchain.common.v1.SubscriberPacketDataType" json:"type,omitempty"` + // data is the data associated with the packet + // + // Types that are valid to be assigned to Data: + // *SubscriberPacketData_SlashPacketData + // *SubscriberPacketData_VscMaturedPacketData + Data isSubscriberPacketData_Data `protobuf_oneof:"data"` +} + +func (m *SubscriberPacketData) Reset() { *m = SubscriberPacketData{} } +func (m *SubscriberPacketData) String() string { return proto.CompactTextString(m) } +func (*SubscriberPacketData) ProtoMessage() {} +func (*SubscriberPacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{3} +} +func (m *SubscriberPacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubscriberPacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SubscriberPacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SubscriberPacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubscriberPacketData.Merge(m, src) +} +func (m *SubscriberPacketData) XXX_Size() int { + return m.Size() +} +func (m *SubscriberPacketData) XXX_DiscardUnknown() { + xxx_messageInfo_SubscriberPacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_SubscriberPacketData proto.InternalMessageInfo + +type isSubscriberPacketData_Data interface { + isSubscriberPacketData_Data() + MarshalTo([]byte) (int, error) + Size() int +} + +type SubscriberPacketData_SlashPacketData struct { + SlashPacketData *SlashPacketData `protobuf:"bytes,2,opt,name=slash_packet_data,json=slashPacketData,proto3,oneof" json:"slash_packet_data,omitempty"` +} +type SubscriberPacketData_VscMaturedPacketData struct { + VscMaturedPacketData *VscMaturedPacketData `protobuf:"bytes,3,opt,name=vsc_matured_packet_data,json=vscMaturedPacketData,proto3,oneof" json:"vsc_matured_packet_data,omitempty"` +} + +func (*SubscriberPacketData_SlashPacketData) isSubscriberPacketData_Data() {} +func (*SubscriberPacketData_VscMaturedPacketData) isSubscriberPacketData_Data() {} + +func (m *SubscriberPacketData) GetData() isSubscriberPacketData_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *SubscriberPacketData) GetType() SubscriberPacketDataType { + if m != nil { + return m.Type + } + return UnspecifiedPacket +} + +func (m *SubscriberPacketData) GetSlashPacketData() *SlashPacketData { + if x, ok := m.GetData().(*SubscriberPacketData_SlashPacketData); ok { + return x.SlashPacketData + } + return nil +} + +func (m *SubscriberPacketData) GetVscMaturedPacketData() *VscMaturedPacketData { + if x, ok := m.GetData().(*SubscriberPacketData_VscMaturedPacketData); ok { + return x.VscMaturedPacketData + } + return nil +} + +// XXX_OneofWrappers is for the internal use of the proto package. +func (*SubscriberPacketData) XXX_OneofWrappers() []interface{} { + return []interface{}{ + (*SubscriberPacketData_SlashPacketData)(nil), + (*SubscriberPacketData_VscMaturedPacketData)(nil), + } +} + +// ValidatorSetChangePacketData is sent from the coordinator chain to the subscriber chain +// containing the new validator set and the id of the validator set change. +type ValidatorSetChangePacketData struct { + // validator_updates is the edits to the existing validator set + ValidatorUpdates []types.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates" yaml:"validator_updates"` + // valset_update_id is the id of the validator set change + ValsetUpdateID uint64 `protobuf:"varint,2,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` + // slash_acks is the list of consensus addresses slashed on the coordinator chain, + // in response to such requests from the subscriber chain. + SlashAcks [][]byte `protobuf:"bytes,3,rep,name=slash_acks,json=slashAcks,proto3" json:"slash_acks,omitempty"` +} + +func (m *ValidatorSetChangePacketData) Reset() { *m = ValidatorSetChangePacketData{} } +func (m *ValidatorSetChangePacketData) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetChangePacketData) ProtoMessage() {} +func (*ValidatorSetChangePacketData) Descriptor() ([]byte, []int) { + return fileDescriptor_646142158918d547, []int{4} +} +func (m *ValidatorSetChangePacketData) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetChangePacketData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetChangePacketData.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSetChangePacketData) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetChangePacketData.Merge(m, src) +} +func (m *ValidatorSetChangePacketData) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetChangePacketData) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetChangePacketData.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSetChangePacketData proto.InternalMessageInfo + +func (m *ValidatorSetChangePacketData) GetValidatorUpdates() []types.ValidatorUpdate { + if m != nil { + return m.ValidatorUpdates + } + return nil +} + +func (m *ValidatorSetChangePacketData) GetValsetUpdateID() uint64 { + if m != nil { + return m.ValsetUpdateID + } + return 0 +} + +func (m *ValidatorSetChangePacketData) GetSlashAcks() [][]byte { + if m != nil { + return m.SlashAcks + } + return nil +} + +func init() { + proto.RegisterEnum("exocore.appchain.common.v1.SubscriberPacketDataType", SubscriberPacketDataType_name, SubscriberPacketDataType_value) + proto.RegisterType((*HandshakeMetadata)(nil), "exocore.appchain.common.v1.HandshakeMetadata") + proto.RegisterType((*SlashPacketData)(nil), "exocore.appchain.common.v1.SlashPacketData") + proto.RegisterType((*VscMaturedPacketData)(nil), "exocore.appchain.common.v1.VscMaturedPacketData") + proto.RegisterType((*SubscriberPacketData)(nil), "exocore.appchain.common.v1.SubscriberPacketData") + proto.RegisterType((*ValidatorSetChangePacketData)(nil), "exocore.appchain.common.v1.ValidatorSetChangePacketData") +} + +func init() { + proto.RegisterFile("exocore/appchain/common/v1/wire.proto", fileDescriptor_646142158918d547) +} + +var fileDescriptor_646142158918d547 = []byte{ + // 734 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xc1, 0x6e, 0xd3, 0x4a, + 0x14, 0xb5, 0x9b, 0xa8, 0x4f, 0x9d, 0x3c, 0xa5, 0x89, 0x95, 0xf7, 0x30, 0x06, 0x52, 0x63, 0x81, + 0x88, 0x40, 0xb2, 0x49, 0x40, 0x48, 0x45, 0x6c, 0xec, 0x24, 0x55, 0x22, 0x68, 0x15, 0xd9, 0x49, + 0xa4, 0xb2, 0xb1, 0x26, 0xf6, 0x34, 0xb1, 0x92, 0x78, 0x2c, 0xcf, 0xc4, 0x6d, 0xff, 0x00, 0x75, + 0xc5, 0x0f, 0x74, 0xc5, 0x9e, 0xef, 0xe8, 0xb2, 0x4b, 0x56, 0x05, 0xa5, 0x0b, 0x36, 0xac, 0xf8, + 0x02, 0x64, 0x3b, 0x4e, 0xd3, 0x92, 0x46, 0x48, 0xec, 0x66, 0xe6, 0xde, 0x7b, 0xee, 0x9d, 0x73, + 0x8e, 0x2e, 0x78, 0x8c, 0x8e, 0xb0, 0x85, 0x7d, 0xa4, 0x40, 0xcf, 0xb3, 0x06, 0xd0, 0x71, 0x15, + 0x0b, 0x8f, 0xc7, 0xd8, 0x55, 0x82, 0xb2, 0x72, 0xe8, 0xf8, 0x48, 0xf6, 0x7c, 0x4c, 0x31, 0x27, + 0xcc, 0xd2, 0xe4, 0x24, 0x4d, 0x8e, 0xd3, 0xe4, 0xa0, 0x2c, 0x3c, 0xb2, 0x30, 0x19, 0x63, 0xa2, + 0x10, 0x0a, 0x87, 0x8e, 0xdb, 0x57, 0x82, 0x72, 0x0f, 0x51, 0x58, 0x4e, 0xee, 0x31, 0x82, 0x50, + 0xe8, 0xe3, 0x3e, 0x8e, 0x8e, 0x4a, 0x78, 0x9a, 0xbd, 0xde, 0xa3, 0xc8, 0xb5, 0x91, 0x3f, 0x76, + 0x5c, 0xaa, 0xc0, 0x9e, 0xe5, 0x28, 0xf4, 0xd8, 0x43, 0x24, 0x0e, 0x4a, 0x03, 0x90, 0x6f, 0x40, + 0xd7, 0x26, 0x03, 0x38, 0x44, 0xbb, 0x88, 0x42, 0x1b, 0x52, 0xc8, 0x6d, 0x83, 0xbb, 0x16, 0xc6, + 0xbe, 0xed, 0xb8, 0x90, 0x62, 0xdf, 0x3c, 0x40, 0xc8, 0xf4, 0x30, 0x1e, 0x99, 0xd0, 0xb6, 0x7d, + 0x9e, 0x15, 0xd9, 0xd2, 0x86, 0xfe, 0xff, 0x42, 0xc2, 0x0e, 0x42, 0x2d, 0x8c, 0x47, 0xaa, 0x6d, + 0xfb, 0x1c, 0x0f, 0xfe, 0x09, 0x90, 0x4f, 0x1c, 0xec, 0xf2, 0x6b, 0x51, 0x62, 0x72, 0x95, 0xbe, + 0xb3, 0x60, 0xd3, 0x18, 0x41, 0x32, 0x68, 0x41, 0x6b, 0x88, 0x68, 0x2d, 0x6c, 0xa4, 0x83, 0x8d, + 0x00, 0x8e, 0x1c, 0x3b, 0x44, 0x89, 0x80, 0x33, 0x15, 0x41, 0xbe, 0x1a, 0x57, 0x0e, 0xc7, 0x95, + 0xbb, 0x49, 0x86, 0xc6, 0x9f, 0x5d, 0x6c, 0x31, 0x3f, 0x2f, 0xb6, 0x72, 0xc7, 0x70, 0x3c, 0x7a, + 0x2d, 0xcd, 0x4b, 0x25, 0xfd, 0x0a, 0x86, 0x7b, 0x03, 0x72, 0x01, 0x1c, 0x11, 0x44, 0xcd, 0x89, + 0x67, 0x43, 0x8a, 0x4c, 0xc7, 0x8e, 0x46, 0x49, 0x6b, 0xdc, 0xf4, 0x62, 0x2b, 0xdb, 0x8d, 0x62, + 0x9d, 0x28, 0xd4, 0xac, 0xe9, 0xd9, 0x60, 0xf1, 0x6e, 0x73, 0x1a, 0x00, 0x8e, 0x7b, 0xe0, 0x43, + 0x8b, 0x86, 0x5f, 0x48, 0x89, 0x6c, 0x29, 0x5b, 0x91, 0xe4, 0x98, 0x7d, 0x39, 0x61, 0x7b, 0xc6, + 0xbe, 0xdc, 0x9c, 0x67, 0xea, 0x0b, 0x55, 0x52, 0x1b, 0x14, 0xba, 0xc4, 0xda, 0x85, 0x74, 0xe2, + 0x23, 0x7b, 0xe1, 0xb7, 0xcb, 0x26, 0x63, 0xff, 0x74, 0x32, 0xe9, 0xf3, 0x1a, 0x28, 0x18, 0x93, + 0x1e, 0xb1, 0x7c, 0xa7, 0x87, 0xfc, 0x05, 0xd8, 0x06, 0x48, 0x87, 0x8a, 0x46, 0x50, 0xd9, 0xca, + 0x4b, 0xf9, 0x76, 0x1b, 0xc9, 0xcb, 0xea, 0xdb, 0xc7, 0x1e, 0xd2, 0x23, 0x04, 0x6e, 0x1f, 0xe4, + 0x49, 0xa8, 0x90, 0xe9, 0x45, 0x51, 0x33, 0x34, 0x43, 0xc4, 0x5d, 0xa6, 0xf2, 0x6c, 0x25, 0xec, + 0x75, 0x59, 0x1b, 0x8c, 0xbe, 0x49, 0x6e, 0x28, 0xed, 0x80, 0x3b, 0x01, 0xb1, 0xcc, 0x71, 0x4c, + 0xca, 0xb5, 0x06, 0xa9, 0xa8, 0xc1, 0xf3, 0x55, 0x0d, 0x96, 0xd1, 0xd9, 0x60, 0xf4, 0x42, 0xb0, + 0xe4, 0x5d, 0x5b, 0x07, 0xe9, 0x10, 0x57, 0xfa, 0xc1, 0x82, 0xfb, 0x73, 0xef, 0x18, 0x88, 0x56, + 0x07, 0xd0, 0xed, 0xa3, 0x85, 0x99, 0x30, 0xc8, 0xcf, 0x6d, 0x33, 0x93, 0x84, 0xf0, 0xac, 0x98, + 0x2a, 0x65, 0x2a, 0xe2, 0xed, 0x2e, 0x8c, 0x05, 0xd1, 0xc4, 0x99, 0x17, 0xf9, 0x1b, 0x5e, 0x4c, + 0x80, 0x24, 0x3d, 0x17, 0x5c, 0x2f, 0x21, 0x7f, 0x69, 0xcd, 0x07, 0x00, 0xc4, 0xea, 0x40, 0x6b, + 0x48, 0xf8, 0x94, 0x98, 0x2a, 0xfd, 0xab, 0x6f, 0x44, 0x2f, 0xaa, 0x35, 0x24, 0x4f, 0xbf, 0xb2, + 0x80, 0xbf, 0x4d, 0x5f, 0x4e, 0x03, 0x4f, 0x8c, 0x8e, 0x66, 0x54, 0xf5, 0xa6, 0x56, 0xd7, 0xcd, + 0x96, 0x5a, 0x7d, 0x5b, 0x6f, 0x9b, 0x35, 0xb5, 0xad, 0x9a, 0xed, 0xfd, 0x56, 0xdd, 0xec, 0xec, + 0x19, 0xad, 0x7a, 0xb5, 0xb9, 0xd3, 0xac, 0xd7, 0x72, 0x8c, 0xf0, 0xdf, 0xc9, 0xa9, 0x98, 0xef, + 0xb8, 0xc4, 0x43, 0x96, 0x73, 0xe0, 0x24, 0xdc, 0x72, 0xaf, 0xc0, 0xc3, 0x55, 0x18, 0xc6, 0x3b, + 0xd5, 0x68, 0xe4, 0x58, 0x61, 0xf3, 0xe4, 0x54, 0xcc, 0x2c, 0x38, 0x82, 0x53, 0x57, 0xf7, 0xee, + 0x1a, 0x55, 0x73, 0x57, 0x6d, 0x77, 0xf4, 0x7a, 0x2d, 0xb7, 0x26, 0x14, 0x4e, 0x4e, 0xc5, 0xdc, + 0x4d, 0xb9, 0x85, 0xf4, 0x87, 0x4f, 0x45, 0x46, 0x33, 0xce, 0xa6, 0x45, 0xf6, 0x7c, 0x5a, 0x64, + 0xbf, 0x4d, 0x8b, 0xec, 0xc7, 0xcb, 0x22, 0x73, 0x7e, 0x59, 0x64, 0xbe, 0x5c, 0x16, 0x99, 0xf7, + 0xdb, 0x7d, 0x87, 0x0e, 0x26, 0xbd, 0xd0, 0x31, 0x4a, 0x3d, 0xb6, 0xd1, 0x1e, 0xa2, 0x87, 0xd8, + 0x1f, 0x2a, 0xc9, 0xee, 0x3d, 0xfa, 0x6d, 0xfb, 0x46, 0x6b, 0xb0, 0xb7, 0x1e, 0xed, 0xc1, 0x17, + 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x55, 0x9e, 0x0c, 0x42, 0xa5, 0x05, 0x00, 0x00, +} + +func (m *HandshakeMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HandshakeMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HandshakeMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintWire(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + } + if len(m.CoordinatorFeePoolAddr) > 0 { + i -= len(m.CoordinatorFeePoolAddr) + copy(dAtA[i:], m.CoordinatorFeePoolAddr) + i = encodeVarintWire(dAtA, i, uint64(len(m.CoordinatorFeePoolAddr))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SlashPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlashPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SlashPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Infraction != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.Infraction)) + i-- + dAtA[i] = 0x18 + } + if m.ValsetUpdateID != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.ValsetUpdateID)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Validator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWire(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VscMaturedPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VscMaturedPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VscMaturedPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValsetUpdateID != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.ValsetUpdateID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubscriberPacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubscriberPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscriberPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + { + size := m.Data.Size() + i -= size + if _, err := m.Data.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + } + } + if m.Type != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *SubscriberPacketData_SlashPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscriberPacketData_SlashPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.SlashPacketData != nil { + { + size, err := m.SlashPacketData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWire(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} +func (m *SubscriberPacketData_VscMaturedPacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubscriberPacketData_VscMaturedPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.VscMaturedPacketData != nil { + { + size, err := m.VscMaturedPacketData.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWire(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + return len(dAtA) - i, nil +} +func (m *ValidatorSetChangePacketData) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetChangePacketData) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetChangePacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.SlashAcks) > 0 { + for iNdEx := len(m.SlashAcks) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SlashAcks[iNdEx]) + copy(dAtA[i:], m.SlashAcks[iNdEx]) + i = encodeVarintWire(dAtA, i, uint64(len(m.SlashAcks[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.ValsetUpdateID != 0 { + i = encodeVarintWire(dAtA, i, uint64(m.ValsetUpdateID)) + i-- + dAtA[i] = 0x10 + } + if len(m.ValidatorUpdates) > 0 { + for iNdEx := len(m.ValidatorUpdates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValidatorUpdates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintWire(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintWire(dAtA []byte, offset int, v uint64) int { + offset -= sovWire(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HandshakeMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CoordinatorFeePoolAddr) + if l > 0 { + n += 1 + l + sovWire(uint64(l)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovWire(uint64(l)) + } + return n +} + +func (m *SlashPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Validator.Size() + n += 1 + l + sovWire(uint64(l)) + if m.ValsetUpdateID != 0 { + n += 1 + sovWire(uint64(m.ValsetUpdateID)) + } + if m.Infraction != 0 { + n += 1 + sovWire(uint64(m.Infraction)) + } + return n +} + +func (m *VscMaturedPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValsetUpdateID != 0 { + n += 1 + sovWire(uint64(m.ValsetUpdateID)) + } + return n +} + +func (m *SubscriberPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Type != 0 { + n += 1 + sovWire(uint64(m.Type)) + } + if m.Data != nil { + n += m.Data.Size() + } + return n +} + +func (m *SubscriberPacketData_SlashPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SlashPacketData != nil { + l = m.SlashPacketData.Size() + n += 1 + l + sovWire(uint64(l)) + } + return n +} +func (m *SubscriberPacketData_VscMaturedPacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.VscMaturedPacketData != nil { + l = m.VscMaturedPacketData.Size() + n += 1 + l + sovWire(uint64(l)) + } + return n +} +func (m *ValidatorSetChangePacketData) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ValidatorUpdates) > 0 { + for _, e := range m.ValidatorUpdates { + l = e.Size() + n += 1 + l + sovWire(uint64(l)) + } + } + if m.ValsetUpdateID != 0 { + n += 1 + sovWire(uint64(m.ValsetUpdateID)) + } + if len(m.SlashAcks) > 0 { + for _, b := range m.SlashAcks { + l = len(b) + n += 1 + l + sovWire(uint64(l)) + } + } + return n +} + +func sovWire(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozWire(x uint64) (n int) { + return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *HandshakeMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HandshakeMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HandshakeMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoordinatorFeePoolAddr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoordinatorFeePoolAddr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SlashPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SlashPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SlashPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Validator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Validator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateID", wireType) + } + m.ValsetUpdateID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Infraction", wireType) + } + m.Infraction = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Infraction |= types1.Infraction(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VscMaturedPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VscMaturedPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VscMaturedPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateID", wireType) + } + m.ValsetUpdateID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubscriberPacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubscriberPacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubscriberPacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= SubscriberPacketDataType(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SlashPacketData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SlashPacketData{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &SubscriberPacketData_SlashPacketData{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VscMaturedPacketData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &VscMaturedPacketData{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &SubscriberPacketData_VscMaturedPacketData{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetChangePacketData) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetChangePacketData: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetChangePacketData: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorUpdates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorUpdates = append(m.ValidatorUpdates, types.ValidatorUpdate{}) + if err := m.ValidatorUpdates[len(m.ValidatorUpdates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateID", wireType) + } + m.ValsetUpdateID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SlashAcks", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthWire + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SlashAcks = append(m.SlashAcks, make([]byte, postIndex-iNdEx)) + copy(m.SlashAcks[len(m.SlashAcks)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWire(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthWire + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupWire + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthWire + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupWire = fmt.Errorf("proto: unexpected end of group") +) diff --git a/x/appchain/coordinator/keeper/connection.go b/x/appchain/coordinator/keeper/connection.go new file mode 100644 index 000000000..0eba683e6 --- /dev/null +++ b/x/appchain/coordinator/keeper/connection.go @@ -0,0 +1,170 @@ +package keeper + +import ( + "encoding/binary" + + errorsmod "cosmossdk.io/errors" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + subscribertypes "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + conntypes "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibchost "github.com/cosmos/ibc-go/v7/modules/core/exported" + ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" +) + +// VerifySubscriberChain verifies the chain trying to connect on the channel handshake. +// The verification includes the number of connection hops, the presence of a light client, +// as well as the channel's presence. +func (k Keeper) VerifySubscriberChain( + ctx sdk.Context, + _ string, + connectionHops []string, +) error { + if len(connectionHops) != 1 { + return errorsmod.Wrap( + channeltypes.ErrTooManyConnectionHops, + "must have direct connection to coordinator chain", + ) + } + connectionID := connectionHops[0] + clientID, tmClient, err := k.getUnderlyingClient(ctx, connectionID) + if err != nil { + return err + } + storedClientID, found := k.GetClientForChain(ctx, tmClient.ChainId) + if !found { + return errorsmod.Wrapf( + commontypes.ErrClientNotFound, + "cannot find client for subscriber chain %s", + tmClient.ChainId, + ) + } + if storedClientID != clientID { + return errorsmod.Wrapf( + types.ErrInvalidSubscriberClient, + "channel must be built on top of client. expected %s, got %s", + storedClientID, clientID, + ) + } + + // Verify that there isn't already a stored channel + if prevChannel, ok := k.GetChannelForChain(ctx, tmClient.ChainId); ok { + return errorsmod.Wrapf( + commontypes.ErrDuplicateChannel, + "channel with ID: %s already created for subscriber chain %s", + prevChannel, tmClient.ChainId, + ) + } + return nil +} + +// getUnderlyingClient gets the client state of the subscriber chain, +// as deployed on the coordinator chain. +func (k Keeper) getUnderlyingClient(ctx sdk.Context, connectionID string) ( + clientID string, tmClient *ibctmtypes.ClientState, err error, +) { + conn, ok := k.connectionKeeper.GetConnection(ctx, connectionID) + if !ok { + return "", nil, errorsmod.Wrapf(conntypes.ErrConnectionNotFound, + "connection not found for connection ID: %s", connectionID) + } + clientID = conn.ClientId + clientState, ok := k.clientKeeper.GetClientState(ctx, clientID) + if !ok { + return "", nil, errorsmod.Wrapf(clienttypes.ErrClientNotFound, + "client not found for client ID: %s", clientID) + } + tmClient, ok = clientState.(*ibctmtypes.ClientState) + if !ok { + return "", nil, errorsmod.Wrapf( + clienttypes.ErrInvalidClientType, + "invalid client type. expected %s, got %s", + ibchost.Tendermint, + clientState.ClientType(), + ) + } + return clientID, tmClient, nil +} + +// SetSubscriberChain sets the subscriber chain for the given channel ID. +// It is called when the connection handshake is complete. +func (k Keeper) SetSubscriberChain(ctx sdk.Context, channelID string) error { + channel, ok := k.channelKeeper.GetChannel(ctx, commontypes.CoordinatorPortID, channelID) + if !ok { + return errorsmod.Wrapf( + channeltypes.ErrChannelNotFound, + "channel not found for channel ID: %s", channelID, + ) + } + if len(channel.ConnectionHops) != 1 { + return errorsmod.Wrap( + channeltypes.ErrTooManyConnectionHops, + "must have direct connection to subscriber chain", + ) + } + connectionID := channel.ConnectionHops[0] + clientID, tmClient, err := k.getUnderlyingClient(ctx, connectionID) + if err != nil { + return err + } + // Verify that there isn't already a channel for the subscriber chain + chainID := tmClient.ChainId + if prevChannelID, ok := k.GetChannelForChain(ctx, chainID); ok { + return errorsmod.Wrapf( + commontypes.ErrDuplicateChannel, + "channel with Id: %s already created for subscriber chain %s", + prevChannelID, chainID, + ) + } + + // the channel is established: + // - set channel mappings + k.SetChannelForChain(ctx, chainID, channelID) + k.SetChainForChannel(ctx, channelID, chainID) + // - set current block height for the subscriber chain initialization + k.SetInitChainHeight(ctx, chainID, uint64(ctx.BlockHeight())) + // remove init timeout timestamp + timeout, exists := k.GetChainInitTimeout(ctx, chainID) + if exists { + k.DeleteChainInitTimeout(ctx, chainID) + k.RemoveChainFromInitTimeout(ctx, timeout, chainID) + } else { + k.Logger(ctx).Error("timeout not found for chain", "chainID", chainID) + } + + // emit event on successful addition + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypeChannelEstablished, + sdk.NewAttribute(sdk.AttributeKeyModule, subscribertypes.ModuleName), + sdk.NewAttribute(commontypes.AttributeChainID, chainID), + sdk.NewAttribute(conntypes.AttributeKeyClientID, clientID), + sdk.NewAttribute(channeltypes.AttributeKeyChannelID, channelID), + sdk.NewAttribute(conntypes.AttributeKeyConnectionID, connectionID), + ), + ) + return nil +} + +// SetInitChainHeight sets the Exocore block height when the given app chain was initiated +func (k Keeper) SetInitChainHeight(ctx sdk.Context, chainID string, height uint64) { + store := ctx.KVStore(k.storeKey) + heightBytes := make([]byte, 8) + binary.BigEndian.PutUint64(heightBytes, height) + + store.Set(types.InitChainHeightKey(chainID), heightBytes) +} + +// GetInitChainHeight returns the Exocore block height when the given app chain was initiated +func (k Keeper) GetInitChainHeight(ctx sdk.Context, chainID string) (uint64, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.InitChainHeightKey(chainID)) + if bz == nil { + return 0, false + } + + return binary.BigEndian.Uint64(bz), true +} diff --git a/x/appchain/coordinator/keeper/connection_test.go b/x/appchain/coordinator/keeper/connection_test.go new file mode 100644 index 000000000..e1d0a96c7 --- /dev/null +++ b/x/appchain/coordinator/keeper/connection_test.go @@ -0,0 +1,139 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + connectiontypes "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" + + testutil "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" +) + +func TestKeeper_VerifySubscriberChain(t *testing.T) { + k, ctx, mocks := testutil.NewCoordinatorKeeper(t) + + tests := []struct { + name string + connectionHops []string + setup func() + wantErr bool + errorType error + }{ + { + name: "success", + connectionHops: []string{"connection-0"}, + setup: func() { + connection := connectiontypes.ConnectionEnd{ClientId: "07-tendermint-0"} + mocks.ConnectionKeeper.EXPECT().GetConnection(ctx, "connection-0").Return(connection, true) + + clientState := ibctmtypes.ClientState{ChainId: "test-chain-1"} + mocks.ClientKeeper.EXPECT().GetClientState(ctx, "07-tendermint-0").Return(&clientState, true) + + // Instead of mocking Keeper.GetClientForChain, we'll set up the state + k.SetClientForChain(ctx, "test-chain-1", "07-tendermint-0") + }, + wantErr: false, + }, + // ... other test cases remain the same + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + tt.setup() + } + + err := k.VerifySubscriberChain(ctx, "", tt.connectionHops) + + if tt.wantErr { + assert.Error(t, err) + assert.ErrorIs(t, err, tt.errorType) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestKeeper_SetSubscriberChain(t *testing.T) { + k, ctx, mocks := testutil.NewCoordinatorKeeper(t) + + tests := []struct { + name string + channelID string + setup func() + wantErr bool + errorType error + }{ + { + name: "success", + channelID: "channel-0", + setup: func() { + channel := channeltypes.Channel{ConnectionHops: []string{"connection-0"}} + mocks.ChannelKeeper.EXPECT().GetChannel(ctx, commontypes.CoordinatorPortID, "channel-0").Return(channel, true) + + connection := connectiontypes.ConnectionEnd{ClientId: "07-tendermint-0"} + mocks.ConnectionKeeper.EXPECT().GetConnection(ctx, "connection-0").Return(connection, true) + + clientState := ibctmtypes.ClientState{ChainId: "test-chain-1"} + mocks.ClientKeeper.EXPECT().GetClientState(ctx, "07-tendermint-0").Return(&clientState, true) + + // Instead of mocking Keeper methods, we'll check the state after calling SetSubscriberChain + }, + wantErr: false, + }, + // ... other test cases remain the same + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + tt.setup() + } + + err := k.SetSubscriberChain(ctx, tt.channelID) + + if tt.wantErr { + assert.Error(t, err) + assert.ErrorIs(t, err, tt.errorType) + } else { + assert.NoError(t, err) + // Check the state after SetSubscriberChain + channelID, found := k.GetChannelForChain(ctx, "test-chain-1") + assert.True(t, found) + assert.Equal(t, tt.channelID, channelID) + + chainID, found := k.GetChainForChannel(ctx, tt.channelID) + assert.True(t, found) + assert.Equal(t, "test-chain-1", chainID) + + height, found := k.GetInitChainHeight(ctx, "test-chain-1") + assert.True(t, found) + assert.Equal(t, uint64(ctx.BlockHeight()), height) + } + }) + } +} + +func TestKeeper_InitChainHeight(t *testing.T) { + k, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain-1" + height := uint64(100) + + // Test SetInitChainHeight and GetInitChainHeight + k.SetInitChainHeight(ctx, chainID, height) + + gotHeight, found := k.GetInitChainHeight(ctx, chainID) + require.True(t, found) + assert.Equal(t, height, gotHeight) + + // Test GetInitChainHeight for non-existent chain + _, found = k.GetInitChainHeight(ctx, "non-existent-chain") + assert.False(t, found) +} diff --git a/x/appchain/coordinator/keeper/distribution.go b/x/appchain/coordinator/keeper/distribution.go new file mode 100644 index 000000000..3e58cff60 --- /dev/null +++ b/x/appchain/coordinator/keeper/distribution.go @@ -0,0 +1,18 @@ +package keeper + +import ( + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// GetSubscriberRewardsPoolAddressStr gets the subscriber rewards pool address string. +// It is the bech32 string corresponding to a hardcoded module account. +func (k Keeper) GetSubscriberRewardsPoolAddressStr(ctx sdk.Context) string { + return k.accountKeeper.GetModuleAccount( + ctx, types.SubscriberRewardsPool, + ).GetAddress().String() +} + +// TODO: distribution implementation +// (1) validate that the subscriber has sent rewards to the pool and if not penalize them +// (2) forward rewards to the fee collector account diff --git a/x/appchain/coordinator/keeper/genesis.go b/x/appchain/coordinator/keeper/genesis.go index 8411f4023..eb3264eb6 100644 --- a/x/appchain/coordinator/keeper/genesis.go +++ b/x/appchain/coordinator/keeper/genesis.go @@ -8,10 +8,12 @@ import ( func (k Keeper) InitGenesis(ctx sdk.Context, gs types.GenesisState) []abci.ValidatorUpdate { k.SetParams(ctx, gs.Params) + // TODO: initialize any other genesis state return []abci.ValidatorUpdate{} } func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { + // TODO: export any other genesis state return &types.GenesisState{ Params: k.GetParams(ctx), } diff --git a/x/appchain/coordinator/keeper/grpc_query_test.go b/x/appchain/coordinator/keeper/grpc_query_test.go new file mode 100644 index 000000000..aac6cde68 --- /dev/null +++ b/x/appchain/coordinator/keeper/grpc_query_test.go @@ -0,0 +1,69 @@ +package keeper_test + +import ( + "testing" + + testutil "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +func TestQueryParams(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + t.Run("Valid request", func(t *testing.T) { + expectedParams := types.DefaultParams() + keeper.SetParams(ctx, expectedParams) + + response, err := keeper.QueryParams(sdk.WrapSDKContext(ctx), &types.QueryParamsRequest{}) + require.NoError(t, err) + assert.Equal(t, expectedParams, response.Params) + }) + + t.Run("Nil request", func(t *testing.T) { + response, err := keeper.QueryParams(sdk.WrapSDKContext(ctx), nil) + require.Error(t, err) + assert.Nil(t, response) + assert.Equal(t, codes.InvalidArgument, status.Code(err)) + }) +} + +func TestQuerySubscriberGenesis(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + t.Run("Existing subscriber genesis", func(t *testing.T) { + expectedGenesis := commontypes.SubscriberGenesisState{ + Params: commontypes.SubscriberParams{}, + Coordinator: commontypes.CoordinatorInfo{}, + // Add other fields as necessary + } + keeper.SetSubscriberGenesis(ctx, "test-chain", &expectedGenesis) + + response, err := keeper.QuerySubscriberGenesis(sdk.WrapSDKContext(ctx), &types.QuerySubscriberGenesisRequest{ + Chain: "test-chain", + }) + require.NoError(t, err) + assert.Equal(t, expectedGenesis, response.SubscriberGenesis) + }) + + t.Run("Non-existent subscriber genesis", func(t *testing.T) { + response, err := keeper.QuerySubscriberGenesis(sdk.WrapSDKContext(ctx), &types.QuerySubscriberGenesisRequest{ + Chain: "non-existent-chain", + }) + require.Error(t, err) + assert.Nil(t, response) + assert.Equal(t, codes.NotFound, status.Code(err)) + }) + + t.Run("Nil request", func(t *testing.T) { + response, err := keeper.QuerySubscriberGenesis(sdk.WrapSDKContext(ctx), nil) + require.Error(t, err) + assert.Nil(t, response) + assert.Equal(t, codes.InvalidArgument, status.Code(err)) + }) +} diff --git a/x/appchain/coordinator/keeper/height.go b/x/appchain/coordinator/keeper/height.go new file mode 100644 index 000000000..f54698aeb --- /dev/null +++ b/x/appchain/coordinator/keeper/height.go @@ -0,0 +1,44 @@ +package keeper + +import ( + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// MapHeightToChainVscID stores the height corresponding to a chainID and vscID +func (k Keeper) MapHeightToChainVscID(ctx sdk.Context, chainID string, vscID uint64, height uint64) { + store := ctx.KVStore(k.storeKey) + key := types.HeightToChainVscIDKey(chainID, vscID) + store.Set(key, sdk.Uint64ToBigEndian(height)) +} + +// GetHeightForChainVscID gets the height corresponding to a chainID and vscID +func (k Keeper) GetHeightForChainVscID(ctx sdk.Context, chainID string, vscID uint64) uint64 { + store := ctx.KVStore(k.storeKey) + key := types.HeightToChainVscIDKey(chainID, vscID) + // if store.Has(key) is false will return a height of 0 + return sdk.BigEndianToUint64(store.Get(key)) +} + +// SetVscIDForChain stores the vscID corresponding to a chainID +func (k Keeper) SetVscIDForChain(ctx sdk.Context, chainID string, vscID uint64) { + store := ctx.KVStore(k.storeKey) + key := types.VscIDForChainKey(chainID) + store.Set(key, sdk.Uint64ToBigEndian(vscID)) +} + +// GetVscIDForChain gets the vscID corresponding to a chainID +func (k Keeper) GetVscIDForChain(ctx sdk.Context, chainID string) uint64 { + store := ctx.KVStore(k.storeKey) + key := types.VscIDForChainKey(chainID) + return sdk.BigEndianToUint64(store.Get(key)) +} + +// IncrementVscIDForChain increments the vscID corresponding to a chainID, and +// stores/returns the new vscID +func (k Keeper) IncrementVscIDForChain(ctx sdk.Context, chainID string) uint64 { + vscID := k.GetVscIDForChain(ctx, chainID) + vscID++ + k.SetVscIDForChain(ctx, chainID, vscID) + return vscID +} diff --git a/x/appchain/coordinator/keeper/height_test.go b/x/appchain/coordinator/keeper/height_test.go new file mode 100644 index 000000000..101ef1932 --- /dev/null +++ b/x/appchain/coordinator/keeper/height_test.go @@ -0,0 +1,90 @@ +package keeper_test + +import ( + "testing" + + testutil "github.com/ExocoreNetwork/exocore/testutil/keeper" + "github.com/stretchr/testify/assert" +) + +func TestKeeper_MapHeightToChainVscID(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + height := uint64(100) + + keeper.MapHeightToChainVscID(ctx, chainID, vscID, height) + + // Verify the mapping was stored correctly + storedHeight := keeper.GetHeightForChainVscID(ctx, chainID, vscID) + assert.Equal(t, height, storedHeight) +} + +func TestKeeper_GetHeightForChainVscID(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + height := uint64(100) + + // Test when the mapping doesn't exist + nonExistentHeight := keeper.GetHeightForChainVscID(ctx, chainID, vscID) + assert.Equal(t, uint64(0), nonExistentHeight) + + // Set a mapping + keeper.MapHeightToChainVscID(ctx, chainID, vscID, height) + + // Test when the mapping exists + existingHeight := keeper.GetHeightForChainVscID(ctx, chainID, vscID) + assert.Equal(t, height, existingHeight) +} + +func TestKeeper_SetVscIDForChain(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + + keeper.SetVscIDForChain(ctx, chainID, vscID) + + // Verify the vscID was stored correctly + storedVscID := keeper.GetVscIDForChain(ctx, chainID) + assert.Equal(t, vscID, storedVscID) +} + +func TestKeeper_GetVscIDForChain(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + + // Test when the vscID doesn't exist + nonExistentVscID := keeper.GetVscIDForChain(ctx, chainID) + assert.Equal(t, uint64(0), nonExistentVscID) + + // Set a vscID + keeper.SetVscIDForChain(ctx, chainID, vscID) + + // Test when the vscID exists + existingVscID := keeper.GetVscIDForChain(ctx, chainID) + assert.Equal(t, vscID, existingVscID) +} + +func TestKeeper_IncrementVscIDForChain(t *testing.T) { + keeper, ctx, _ := testutil.NewCoordinatorKeeper(t) + + chainID := "test-chain" + + // Test initial increment (should start from 0) + newVscID := keeper.IncrementVscIDForChain(ctx, chainID) + assert.Equal(t, uint64(1), newVscID) + + // Test subsequent increment + newVscID = keeper.IncrementVscIDForChain(ctx, chainID) + assert.Equal(t, uint64(2), newVscID) + + // Verify the stored vscID + storedVscID := keeper.GetVscIDForChain(ctx, chainID) + assert.Equal(t, uint64(2), storedVscID) +} diff --git a/x/appchain/coordinator/keeper/ibc_client.go b/x/appchain/coordinator/keeper/ibc_client.go index f31df9243..05c919c94 100644 --- a/x/appchain/coordinator/keeper/ibc_client.go +++ b/x/appchain/coordinator/keeper/ibc_client.go @@ -7,7 +7,6 @@ import ( "github.com/ExocoreNetwork/exocore/utils" commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" abci "github.com/cometbft/cometbft/abci/types" tmtypes "github.com/cometbft/cometbft/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -17,6 +16,41 @@ import ( ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" ) +// ActivateScheduledChains activates the scheduled chains for the given epoch. +func (k Keeper) ActivateScheduledChains(ctx sdk.Context, epochIdentifier string, epochNumber int64) { + executable := k.GetPendingSubChains(ctx, epochIdentifier, uint64(epochNumber)) + for _, subscriber := range executable.List { + _, writeFn, err := k.CreateClientForSubscriberInCachedCtx(ctx, subscriber) + if err != nil { + k.Logger(ctx).Error( + "error creating client for subscriber", + "chainID", subscriber.ChainID, + "error", err, + ) + _, addr := k.avsKeeper.IsAVSByChainID(ctx, subscriber.ChainID) + if err := k.avsKeeper.DeleteAVSInfo(ctx, addr); err != nil { + // should never happen + k.Logger(ctx).Error( + "subscriber AVS not deleted", + "chainID", subscriber, + "error", err, + ) + } + continue + } + writeFn() + k.Logger(ctx).Info( + "subscriber chain started", + "chainID", subscriber, + // we start at the current block and do not allow scheduling. this is the same + // as any other AVS. + "spawn time", ctx.BlockTime().UTC(), + ) + } + // clear pending queue, including those that errored out. + k.ClearPendingSubChains(ctx, epochIdentifier, uint64(epochNumber)) +} + // CreateClientForSubscriberInCachedCtx is a wrapper function around CreateClientForSubscriber. func (k Keeper) CreateClientForSubscriberInCachedCtx( ctx sdk.Context, @@ -37,7 +71,7 @@ func (k Keeper) CreateClientForSubscriber( subscriberParams := req.SubscriberParams // we always deploy a new client for the subscriber chain for our module // technically, the below can never happen but it is guarded in ICS-20 and therefore, here. - if _, found := k.GetClientForChain(ctx, chainID); found { + if _, found := k.GetClientForChain(ctx, chainID); found { // IBC related // client already exists return types.ErrDuplicateSubChain.Wrapf("chainID: %s", chainID) } @@ -47,8 +81,8 @@ func (k Keeper) CreateClientForSubscriber( clientState.ChainId = chainID // TODO(mm): Make this configurable for switchover use case clientState.LatestHeight = clienttypes.Height{ - RevisionNumber: clienttypes.ParseChainID(chainID), - RevisionHeight: 1, + RevisionNumber: clienttypes.ParseChainID(chainID), // IBC related + RevisionHeight: 1, // TODO: different initial height not supported yet } subscriberUnbondingPeriod := subscriberParams.UnbondingPeriod trustPeriod, err := commontypes.CalculateTrustPeriod( @@ -71,9 +105,6 @@ func (k Keeper) CreateClientForSubscriber( } // this state can be pruned after the initial handshake occurs. k.SetSubscriberGenesis(ctx, chainID, subscriberGenesis) - k.SetSubSlashFractionDowntime(ctx, chainID, subscriberParams.SlashFractionDowntime) - k.SetSubSlashFractionDoubleSign(ctx, chainID, subscriberParams.SlashFractionDoubleSign) - k.SetSubDowntimeJailDuration(ctx, chainID, subscriberParams.DowntimeJailDuration) consensusState := ibctmtypes.NewConsensusState( ctx.BlockTime(), commitmenttypes.NewMerkleRoot([]byte(ibctmtypes.SentinelRoot)), @@ -92,7 +123,17 @@ func (k Keeper) CreateClientForSubscriber( // assume we start with a value of 2 and we are giving 4 full epochs for initialization. // so when epoch 6 ends, the timeout ends. initTimeoutPeriod.EpochNumber += uint64(epochInfo.CurrentEpoch) + 1 + // lookup from timeout to chainID k.AppendChainToInitTimeout(ctx, initTimeoutPeriod, chainID) + // reverse lookup from chainID to timeout + k.SetChainInitTimeout(ctx, chainID, initTimeoutPeriod) + + // if the chain doesn't initialize in time, the following items will be cleared. + // same for timeout or error. + k.SetSubSlashFractionDowntime(ctx, chainID, subscriberParams.SlashFractionDowntime) + k.SetSubSlashFractionDoubleSign(ctx, chainID, subscriberParams.SlashFractionDoubleSign) + k.SetSubDowntimeJailDuration(ctx, chainID, subscriberParams.DowntimeJailDuration) + k.SetMaxValidatorsForChain(ctx, chainID, req.MaxValidators) k.Logger(ctx).Info( "subscriber chain registered (client created)", @@ -131,7 +172,6 @@ func (k Keeper) MakeSubscriberGenesis( params := k.GetParams(ctx) chainID := req.ChainID k.Logger(ctx).Info("Creating genesis state for subscriber chain", "chainID", chainID) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(chainID) coordinatorUnbondingPeriod := k.stakingKeeper.UnbondingTime(ctx) // client state clientState := params.TemplateClient @@ -158,9 +198,9 @@ func (k Keeper) MakeSubscriberGenesis( err, chainID, ) } - operators, keys := k.operatorKeeper.GetActiveOperatorsForChainID(ctx, chainIDWithoutRevision) + operators, keys := k.operatorKeeper.GetActiveOperatorsForChainID(ctx, chainID) powers, err := k.operatorKeeper.GetVotePowerForChainID( - ctx, operators, chainIDWithoutRevision, + ctx, operators, chainID, ) if err != nil { // the `err` includes the `chainID` and hence no need to log it again @@ -188,10 +228,21 @@ func (k Keeper) MakeSubscriberGenesis( break } wrappedKey := keys[i] + validator, err := commontypes.NewSubscriberValidator( + wrappedKey.ToConsAddr(), power, wrappedKey.ToSdkKey(), + ) + if err != nil { + // cannot happen, but just in case add this check. + // simply skip the validator if it does. + continue + } validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{ PubKey: *wrappedKey.ToTmProtoKey(), Power: power, }) + // at the time of genesis, all validators are stored since there is no + // existing validator set to take a diff with. + k.SetSubscriberValidatorForChain(ctx, chainID, validator) } if len(validatorUpdates) == 0 { return nil, nil, errorsmod.Wrapf( diff --git a/x/appchain/coordinator/keeper/ibc_client_test.go b/x/appchain/coordinator/keeper/ibc_client_test.go new file mode 100644 index 000000000..40edb3a90 --- /dev/null +++ b/x/appchain/coordinator/keeper/ibc_client_test.go @@ -0,0 +1,232 @@ +package keeper_test + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/suite" + + sdk "github.com/cosmos/cosmos-sdk/types" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + keytypes "github.com/ExocoreNetwork/exocore/types/keys" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" + + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + "go.uber.org/mock/gomock" + + commitmenttypes "github.com/cosmos/ibc-go/v7/modules/core/23-commitment/types" + ibctmtypes "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" +) + +type IBCClientTestSuite struct { + suite.Suite + + ctx sdk.Context + keeper keeper.Keeper + mocks testkeeper.MockedKeepers +} + +func (suite *IBCClientTestSuite) SetupTest() { + suite.keeper, suite.ctx, suite.mocks = testkeeper.NewCoordinatorKeeper(suite.T()) +} + +func TestIBCClientTestSuite(t *testing.T) { + suite.Run(t, new(IBCClientTestSuite)) +} + +func (suite *IBCClientTestSuite) TestActivateScheduledChains() { + testCases := []struct { + name string + setupMocks func() + epochIdentifier string + epochNumber int64 + }{ + { + name: "Successful activation of one scheduled chain", + setupMocks: func() { + pendingChains := types.PendingSubscriberChainRequests{ + List: []types.RegisterSubscriberChainRequest{ + { + ChainID: "chain-1", + FromAddress: sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String(), + EpochIdentifier: "day", + AssetIDs: []string{"asset-1"}, + MinSelfDelegationUsd: 100, + MaxValidators: 100, + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: time.Hour * 24 * 14, + }, + }, + }, + } + for _, chain := range pendingChains.List { + suite.keeper.AppendPendingSubChain(suite.ctx, "day", 1, &chain) + } + suite.mocks.StakingKeeper.EXPECT().UnbondingTime(gomock.Any()).Return(time.Hour * 24 * 14) + mockConsensusState := &ibctmtypes.ConsensusState{ + // Fill with appropriate test data + Timestamp: time.Now(), + Root: commitmenttypes.NewMerkleRoot([]byte("root")), + NextValidatorsHash: []byte("next_validators_hash"), + } + suite.mocks.ClientKeeper.EXPECT().GetSelfConsensusState(gomock.Any(), gomock.Any()).Return(mockConsensusState, nil) + suite.mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(gomock.Any(), gomock.Any()).Return( + []sdk.AccAddress{ + sdk.AccAddress(testutiltx.GenerateAddress().Bytes()), + sdk.AccAddress(testutiltx.GenerateAddress().Bytes())}, + []keytypes.WrappedConsKey{ + testutiltx.GenerateConsensusKey(), + testutiltx.GenerateConsensusKey(), + }, + ) + suite.mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), gomock.Any()).Return([]int64{100, 200}, nil) + suite.mocks.ClientKeeper.EXPECT().CreateClient(gomock.Any(), gomock.Any(), gomock.Any()).Return("clientID", nil) + suite.mocks.EpochsKeeper.EXPECT().GetEpochInfo(gomock.Any(), gomock.Any()).Return(epochstypes.EpochInfo{CurrentEpoch: 1}, true) + }, + epochIdentifier: "day", + epochNumber: 1, + }, + { + name: "Activation of multiple scheduled chains", + setupMocks: func() { + pendingChains := types.PendingSubscriberChainRequests{ + List: []types.RegisterSubscriberChainRequest{ + { + ChainID: "chain-1", + FromAddress: sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String(), + EpochIdentifier: "week", + AssetIDs: []string{"asset-1"}, + MinSelfDelegationUsd: 100, + MaxValidators: 100, + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: time.Hour * 24 * 14, + }, + }, + { + ChainID: "chain-2", + FromAddress: sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String(), + EpochIdentifier: "week", + AssetIDs: []string{"asset-2"}, + MinSelfDelegationUsd: 200, + MaxValidators: 50, + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: time.Hour * 24 * 21, + }, + }, + }, + } + for _, chain := range pendingChains.List { + suite.keeper.AppendPendingSubChain(suite.ctx, "week", 1, &chain) + } + suite.mocks.StakingKeeper.EXPECT().UnbondingTime(gomock.Any()).Return(time.Hour * 24 * 14).Times(2) + mockConsensusState := &ibctmtypes.ConsensusState{ + Timestamp: time.Now(), + Root: commitmenttypes.NewMerkleRoot([]byte("root")), + NextValidatorsHash: []byte("next_validators_hash"), + } + suite.mocks.ClientKeeper.EXPECT().GetSelfConsensusState(gomock.Any(), gomock.Any()).Return(mockConsensusState, nil).Times(2) + suite.mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(gomock.Any(), gomock.Any()).Return( + []sdk.AccAddress{ + sdk.AccAddress(testutiltx.GenerateAddress().Bytes()), + sdk.AccAddress(testutiltx.GenerateAddress().Bytes())}, + []keytypes.WrappedConsKey{ + testutiltx.GenerateConsensusKey(), + testutiltx.GenerateConsensusKey(), + }, + ).Times(2) + suite.mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), gomock.Any()).Return([]int64{100, 200}, nil).Times(2) + suite.mocks.ClientKeeper.EXPECT().CreateClient(gomock.Any(), gomock.Any(), gomock.Any()).Return("clientID1", nil) + suite.mocks.ClientKeeper.EXPECT().CreateClient(gomock.Any(), gomock.Any(), gomock.Any()).Return("clientID2", nil) + suite.mocks.EpochsKeeper.EXPECT().GetEpochInfo(gomock.Any(), gomock.Any()).Return(epochstypes.EpochInfo{CurrentEpoch: 1}, true).Times(2) + }, + epochIdentifier: "week", + epochNumber: 1, + }, + { + name: "Activation with one chain failing", + setupMocks: func() { + pendingChains := types.PendingSubscriberChainRequests{ + List: []types.RegisterSubscriberChainRequest{ + { + ChainID: "chain-1", + FromAddress: sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String(), + EpochIdentifier: "month", + AssetIDs: []string{"asset-1"}, + MinSelfDelegationUsd: 100, + MaxValidators: 100, + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: time.Hour * 24 * 14, + }, + }, + { + ChainID: "chain-2", + FromAddress: sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String(), + EpochIdentifier: "month", + AssetIDs: []string{"asset-2"}, + MinSelfDelegationUsd: 200, + MaxValidators: 50, + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: time.Hour * 24 * 21, + }, + }, + }, + } + for _, chain := range pendingChains.List { + suite.keeper.AppendPendingSubChain(suite.ctx, "month", 1, &chain) + } + suite.mocks.StakingKeeper.EXPECT().UnbondingTime(gomock.Any()).Return(time.Hour * 24 * 14).Times(2) + mockConsensusState := &ibctmtypes.ConsensusState{ + Timestamp: time.Now(), + Root: commitmenttypes.NewMerkleRoot([]byte("root")), + NextValidatorsHash: []byte("next_validators_hash"), + } + suite.mocks.ClientKeeper.EXPECT().GetSelfConsensusState(gomock.Any(), gomock.Any()).Return(mockConsensusState, nil).Times(2) + suite.mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(gomock.Any(), gomock.Any()).Return( + []sdk.AccAddress{ + sdk.AccAddress(testutiltx.GenerateAddress().Bytes()), + sdk.AccAddress(testutiltx.GenerateAddress().Bytes())}, + []keytypes.WrappedConsKey{ + testutiltx.GenerateConsensusKey(), + testutiltx.GenerateConsensusKey(), + }, + ).Times(2) + suite.mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), gomock.Any()).Return([]int64{100, 200}, nil) + suite.mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), gomock.Any()).Return([]int64{}, fmt.Errorf("error getting vote power")) + suite.mocks.ClientKeeper.EXPECT().CreateClient(gomock.Any(), gomock.Any(), gomock.Any()).Return("clientID1", nil) + suite.mocks.EpochsKeeper.EXPECT().GetEpochInfo(gomock.Any(), gomock.Any()).Return(epochstypes.EpochInfo{CurrentEpoch: 1}, true) + suite.mocks.AVSKeeper.EXPECT().IsAVSByChainID(gomock.Any(), gomock.Any()).Return(true, testutiltx.GenerateAddress().String()) + suite.mocks.AVSKeeper.EXPECT().DeleteAVSInfo(gomock.Any(), gomock.Any()).Return(nil) + }, + epochIdentifier: "month", + epochNumber: 1, + }, + { + name: "No chains to activate", + setupMocks: func() { + // No pending chains, so no mocks needed + }, + epochIdentifier: "year", + epochNumber: 1, + }, + } + + for _, tc := range testCases { + suite.Run(tc.name, func() { + suite.SetupTest() // Reset state for each test case + if tc.setupMocks != nil { + tc.setupMocks() + } + + suite.keeper.ActivateScheduledChains(suite.ctx, tc.epochIdentifier, tc.epochNumber) + + // Verify pending chains are cleared + pendingChains := suite.keeper.GetPendingSubChains(suite.ctx, tc.epochIdentifier, uint64(tc.epochNumber)) + suite.Empty(pendingChains.List, "Pending chains should be cleared") + }) + } +} diff --git a/x/appchain/coordinator/keeper/identifiers.go b/x/appchain/coordinator/keeper/identifiers.go index 63b8b3353..dbc511162 100644 --- a/x/appchain/coordinator/keeper/identifiers.go +++ b/x/appchain/coordinator/keeper/identifiers.go @@ -30,3 +30,66 @@ func (k Keeper) DeleteClientForChain(ctx sdk.Context, chainID string) { store := ctx.KVStore(k.storeKey) store.Delete(types.ClientForChainKey(chainID)) } + +// GetAllChainsWithClients gets all chain ids that have an ibc client id. +func (k Keeper) GetAllChainsWithClients(ctx sdk.Context) []string { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.ClientForChainBytePrefix}) + defer iterator.Close() + + var chains []string + for ; iterator.Valid(); iterator.Next() { + chainID := string(iterator.Key()[1:]) + chains = append(chains, chainID) + } + + return chains +} + +// SetChannelForChain sets the ibc channel id for a given chain id. +func (k Keeper) SetChannelForChain(ctx sdk.Context, chainID string, channelID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.ChannelForChainKey(chainID), []byte(channelID)) +} + +// GetChannelForChain gets the ibc channel id for a given chain id. +func (k Keeper) GetChannelForChain(ctx sdk.Context, chainID string) (string, bool) { + store := ctx.KVStore(k.storeKey) + bytes := store.Get(types.ChannelForChainKey(chainID)) + if bytes == nil { + return "", false + } + return string(bytes), true +} + +// GetAllChainsWithChannels gets all chain ids that have an ibc channel id, on top of the +// client id. +func (k Keeper) GetAllChainsWithChannels(ctx sdk.Context) []string { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.ChannelForChainBytePrefix}) + defer iterator.Close() + + var chains []string + for ; iterator.Valid(); iterator.Next() { + chainID := string(iterator.Key()[1:]) + chains = append(chains, chainID) + } + + return chains +} + +// SetChainForChannel sets the chain id for a given channel id. +func (k Keeper) SetChainForChannel(ctx sdk.Context, channelID string, chainID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.ChainForChannelKey(channelID), []byte(chainID)) +} + +// GetChainForChannel gets the chain id for a given channel id. +func (k Keeper) GetChainForChannel(ctx sdk.Context, channelID string) (string, bool) { + store := ctx.KVStore(k.storeKey) + bytes := store.Get(types.ChainForChannelKey(channelID)) + if bytes == nil { + return "", false + } + return string(bytes), true +} diff --git a/x/appchain/coordinator/keeper/identifiers_test.go b/x/appchain/coordinator/keeper/identifiers_test.go new file mode 100644 index 000000000..06a371e13 --- /dev/null +++ b/x/appchain/coordinator/keeper/identifiers_test.go @@ -0,0 +1,96 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" +) + +func TestSetGetClientForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + clientID := "test-client" + + // Test setting and getting + keeper.SetClientForChain(ctx, chainID, clientID) + gotClientID, found := keeper.GetClientForChain(ctx, chainID) + + require.True(t, found) + require.Equal(t, clientID, gotClientID) + + // Test getting non-existent client + _, found = keeper.GetClientForChain(ctx, "non-existent-chain") + require.False(t, found) +} + +func TestDeleteClientForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + clientID := "test-client" + + keeper.SetClientForChain(ctx, chainID, clientID) + keeper.DeleteClientForChain(ctx, chainID) + + _, found := keeper.GetClientForChain(ctx, chainID) + require.False(t, found) +} + +func TestGetAllChainsWithClients(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainIDs := []string{"chain1", "chain2", "chain3"} + for _, chainID := range chainIDs { + keeper.SetClientForChain(ctx, chainID, "client-"+chainID) + } + + gotChainIDs := keeper.GetAllChainsWithClients(ctx) + require.ElementsMatch(t, chainIDs, gotChainIDs) +} + +func TestSetGetChannelForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + channelID := "test-channel" + + keeper.SetChannelForChain(ctx, chainID, channelID) + gotChannelID, found := keeper.GetChannelForChain(ctx, chainID) + + require.True(t, found) + require.Equal(t, channelID, gotChannelID) + + _, found = keeper.GetChannelForChain(ctx, "non-existent-chain") + require.False(t, found) +} + +func TestGetAllChainsWithChannels(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainIDs := []string{"chain1", "chain2", "chain3"} + for _, chainID := range chainIDs { + keeper.SetChannelForChain(ctx, chainID, "channel-"+chainID) + } + + gotChainIDs := keeper.GetAllChainsWithChannels(ctx) + require.ElementsMatch(t, chainIDs, gotChainIDs) +} + +func TestSetGetChainForChannel(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + channelID := "test-channel" + chainID := "test-chain" + + keeper.SetChainForChannel(ctx, channelID, chainID) + gotChainID, found := keeper.GetChainForChannel(ctx, channelID) + + require.True(t, found) + require.Equal(t, chainID, gotChainID) + + _, found = keeper.GetChainForChannel(ctx, "non-existent-channel") + require.False(t, found) +} diff --git a/x/appchain/coordinator/keeper/impl_delegation_hooks.go b/x/appchain/coordinator/keeper/impl_delegation_hooks.go new file mode 100644 index 000000000..d52896904 --- /dev/null +++ b/x/appchain/coordinator/keeper/impl_delegation_hooks.go @@ -0,0 +1,100 @@ +package keeper + +import ( + "fmt" + + "github.com/ExocoreNetwork/exocore/utils" + delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// DelegationHooksWrapper is the wrapper structure that implements the delegation hooks for the +// dogfood keeper. +type DelegationHooksWrapper struct { + keeper *Keeper +} + +// Interface guard +var _ delegationtypes.DelegationHooks = DelegationHooksWrapper{} + +// DelegationHooks returns the delegation hooks wrapper. It follows the "accept interfaces, +// return concretes" pattern. +func (k *Keeper) DelegationHooks() DelegationHooksWrapper { + return DelegationHooksWrapper{k} +} + +// AfterDelegation is called after a delegation is made. +func (wrapper DelegationHooksWrapper) AfterDelegation( + sdk.Context, sdk.AccAddress, +) { + // we do nothing here, since the vote power for all operators is calculated + // in the end separately. even if we knew the amount of the delegation, the + // exchange rate at the end of the epoch is unknown. +} + +// AfterUndelegationStarted is called after an undelegation is started. +func (wrapper DelegationHooksWrapper) AfterUndelegationStarted( + ctx sdk.Context, operator sdk.AccAddress, recordKey []byte, +) error { + logger := wrapper.keeper.Logger(ctx) + // given the operator, find the chainIDs for which they are (1) opted in, or (2) in the process of opting out. + // (1) simply let the undelegation mature when it matures on the subscriber chain. + // (2) the undelegation should mature when the operator's opt out matures on the subscriber chain. + // within the undelegation situation, the previous keys don't matter, because + // they will be replaced anyway. hence, we only need to check the current keys. + chainIDs, err := wrapper.keeper.operatorKeeper.GetChainIDsForOperator(ctx, operator.String()) + if err != nil { + logger.Error( + "error getting chainIDs for operator", + "operator", operator, + "recordKey", fmt.Sprintf("%x", recordKey), + ) + // do not return an error because that would indicate an undelegation failure. + // TODO: verify the above claim and check the impact of actually returning the error + return nil + } + // TODO: above only returns the chainIDs for which the operator is opted-in, but + // not those for which the operator is in the process of opting out. this will be + // resolved in the unbonding duration calculation pull request and hopefully, + // meaningfully unified. + for _, chainID := range chainIDs { + if chainID != utils.ChainIDWithoutRevision(ctx.ChainID()) { + found, wrappedKey, _ := wrapper.keeper.operatorKeeper.GetOperatorConsKeyForChainID( + ctx, operator, chainID, + ) + if !found { + logger.Debug( + "operator not opted in; ignoring", + "operator", operator, + "chainID", chainID, + ) + continue + } + var nextVscID uint64 + if wrapper.keeper.operatorKeeper.IsOperatorRemovingKeyFromChainID( + ctx, operator, chainID, + ) { + nextVscID = wrapper.keeper.GetMaturityVscIDForChainIDConsAddr(ctx, chainID, wrappedKey.ToConsAddr()) + if nextVscID == 0 { + logger.Error( + "undelegation maturity epoch not set", + "operator", operator, + "chainID", chainID, + "consAddr", wrappedKey.ToConsAddr(), + "recordKey", fmt.Sprintf("%x", recordKey), + ) + // move on to the next chainID + continue + } + } else { + nextVscID = wrapper.keeper.GetVscIDForChain(ctx, chainID) + 1 + } + wrapper.keeper.AppendUndelegationToRelease(ctx, chainID, nextVscID, recordKey) + // increment the count for each such chainID + if err := wrapper.keeper.delegationKeeper.IncrementUndelegationHoldCount(ctx, recordKey); err != nil { + return err + } + } + } + return nil +} diff --git a/x/appchain/coordinator/keeper/impl_delegation_hooks_test.go b/x/appchain/coordinator/keeper/impl_delegation_hooks_test.go new file mode 100644 index 000000000..5012ade4d --- /dev/null +++ b/x/appchain/coordinator/keeper/impl_delegation_hooks_test.go @@ -0,0 +1,98 @@ +package keeper_test + +import ( + "errors" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + keepermod "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" +) + +func TestAfterDelegation(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.DelegationHooks() + + // AfterDelegation should do nothing, so we just ensure it doesn't panic + require.NotPanics(t, func() { + hooks.AfterDelegation(ctx, sdk.AccAddress{}) + }) +} + +func TestAfterUndelegationStarted(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.DelegationHooks() + + operator := sdk.AccAddress("testoperator") + recordKey := []byte("testrecordkey") + chainIDs := []string{"chain1", "chain2"} + + // Setup mocks + mocks.OperatorKeeper.EXPECT().GetChainIDsForOperator(ctx, operator.String()).Return(chainIDs, nil) + + for _, chainID := range chainIDs { + wrappedKey := testutiltx.GenerateConsensusKey() + mocks.OperatorKeeper.EXPECT().GetOperatorConsKeyForChainID(ctx, operator, chainID). + Return(true, wrappedKey, nil) + mocks.OperatorKeeper.EXPECT().IsOperatorRemovingKeyFromChainID(ctx, operator, chainID). + Return(false) + mocks.DelegationKeeper.EXPECT().IncrementUndelegationHoldCount(ctx, recordKey).Return(nil) + } + + // Test AfterUndelegationStarted + err := hooks.AfterUndelegationStarted(ctx, operator, recordKey) + require.NoError(t, err) + + // Additional test case: operator removing key from chainID + t.Run("Operator removing key", func(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.DelegationHooks() + + chainID := "chain3" + mocks.OperatorKeeper.EXPECT().GetChainIDsForOperator(ctx, operator.String()).Return([]string{chainID}, nil) + wrappedKey := testutiltx.GenerateConsensusKey() + mocks.OperatorKeeper.EXPECT().GetOperatorConsKeyForChainID(ctx, operator, chainID). + Return(true, wrappedKey, nil) + mocks.OperatorKeeper.EXPECT().IsOperatorRemovingKeyFromChainID(ctx, operator, chainID). + Return(true) + + nextVscID := uint64(5) + keeper.SetMaturityVscIDForChainIDConsAddr(ctx, chainID, wrappedKey.ToConsAddr(), nextVscID) + + mocks.DelegationKeeper.EXPECT().IncrementUndelegationHoldCount(ctx, recordKey).Return(nil) + + err := hooks.AfterUndelegationStarted(ctx, operator, recordKey) + require.NoError(t, err) + }) + + // Test case: Error incrementing undelegation hold count + t.Run("Error incrementing hold count", func(t *testing.T) { + expErr := errors.New("error incrementing undelegation hold count") + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.DelegationHooks() + + mocks.OperatorKeeper.EXPECT().GetChainIDsForOperator(ctx, operator.String()).Return(chainIDs, nil) + wrappedKey := testutiltx.GenerateConsensusKey() + mocks.OperatorKeeper.EXPECT().GetOperatorConsKeyForChainID(ctx, operator, chainIDs[0]). + Return(true, wrappedKey, nil) + mocks.OperatorKeeper.EXPECT().IsOperatorRemovingKeyFromChainID(ctx, operator, chainIDs[0]). + Return(false) + mocks.DelegationKeeper.EXPECT().IncrementUndelegationHoldCount(ctx, recordKey). + Return(expErr) + + err := hooks.AfterUndelegationStarted(ctx, operator, recordKey) + require.Error(t, err) + require.ErrorIs(t, err, expErr) + }) +} + +func TestDelegationHooks(t *testing.T) { + keeper, _, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.DelegationHooks() + + require.NotNil(t, hooks) + require.IsType(t, keepermod.DelegationHooksWrapper{}, hooks) +} diff --git a/x/appchain/coordinator/keeper/impl_epochs_hooks.go b/x/appchain/coordinator/keeper/impl_epochs_hooks.go index 2643ea11b..3e7bfcc90 100644 --- a/x/appchain/coordinator/keeper/impl_epochs_hooks.go +++ b/x/appchain/coordinator/keeper/impl_epochs_hooks.go @@ -1,7 +1,6 @@ package keeper import ( - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -25,50 +24,28 @@ func (k *Keeper) EpochsHooks() EpochsHooksWrapper { func (wrapper EpochsHooksWrapper) AfterEpochEnd( ctx sdk.Context, identifier string, epoch int64, ) { - // whenever an epoch ends, we should iterate over the list of pending subscriber chains - // to be activated, and then activate them. once activated, we should move them from - // the pending list to the active list. - executable := wrapper.keeper.GetPendingSubChains(ctx, identifier, uint64(epoch)) - for _, subscriber := range executable.List { - cctx, writeFn, err := wrapper.keeper.CreateClientForSubscriberInCachedCtx(ctx, subscriber) - if err != nil { - // within this block, we use the ctx and not the cctx, since the cctx's job is solely - // to guard the client creation. - // no re-attempts will be made for this subscriber - ctx.Logger().Error( - "subscriber client not created", - "chainID", subscriber.ChainID, - "error", err, - ) - // clear the registered AVS. remember that this module stores - // the chainID with the revision but the AVS module stores it without. - chainID := avstypes.ChainIDWithoutRevision(subscriber.ChainID) - // always guaranteed to exist - _, addr := wrapper.keeper.avsKeeper.IsAVSByChainID(ctx, chainID) - if err := wrapper.keeper.avsKeeper.DeleteAVSInfo(ctx, addr); err != nil { - // should never happen - ctx.Logger().Error( - "subscriber AVS not deleted", - "chainID", subscriber, - "error", err, - ) - } - continue - } - // copy over the events from the cached ctx - ctx.EventManager().EmitEvents(cctx.EventManager().Events()) - writeFn() - wrapper.keeper.Logger(ctx).Info( - "subscriber chain started", - "chainID", subscriber, - // we start at the current block and do not allow scheduling. this is the same - // as any other AVS. - "spawn time", ctx.BlockTime().UTC(), - ) - } - // delete those that were executed (including those that failed) - wrapper.keeper.ClearPendingSubChains(ctx, identifier, uint64(epoch)) - // next, we iterate over the active list and queue the validator set update for them. + // start any chains that are due to start, by creating their genesis state. + wrapper.keeper.ActivateScheduledChains(ctx, identifier, epoch) + + // slashing is applied during the epoch, so we don't have to do anything about that here. + // note that slashing should flow through to this keeper via a hook and the impact + // should be applied to the validator set. first, it should freeze the oracle round, + // then, it should calculate the USD power, then, it should find the new x/dogfood + // validator set and lastly, it should find the new appchain validator set for the + // impacted chains. + + // next, we remove any chains that didn't respond in time: either to the validator + // set update or to the initialization protocol. the removal is undertaken before + // generating the validator set update to save resources. + wrapper.keeper.RemoveTimedoutSubscribers(ctx, identifier, epoch) + + // last, we iterate over the active list and queue the validator set update for them. + // interchain-security does this in EndBlock, but we can do it now because our validator + // set is independent of the coordinator chain's. + wrapper.keeper.QueueValidatorUpdatesForEpochID(ctx, identifier, epoch) + // send the queued validator updates. the `epoch` is used for scheduling the VSC timeouts + // and nothing else. it has no bearing on the actual validator set. + wrapper.keeper.SendQueuedValidatorUpdates(ctx, epoch) } // BeforeEpochStart is called before an epoch starts. diff --git a/x/appchain/coordinator/keeper/impl_epochs_hooks_test.go b/x/appchain/coordinator/keeper/impl_epochs_hooks_test.go new file mode 100644 index 000000000..2be82ac3e --- /dev/null +++ b/x/appchain/coordinator/keeper/impl_epochs_hooks_test.go @@ -0,0 +1,66 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + keepermod "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" +) + +func TestEpochsHooks(t *testing.T) { + keeper, _, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.EpochsHooks() + + require.NotNil(t, hooks) + require.IsType(t, keepermod.EpochsHooksWrapper{}, hooks) +} + +func TestBeforeEpochStart(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.EpochsHooks() + + // BeforeEpochStart should do nothing, so we just ensure it doesn't panic + require.NotPanics(t, func() { + hooks.BeforeEpochStart(ctx, "test-epoch", 1) + }) +} + +func TestAfterEpochEnd(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.EpochsHooks() + + identifier := "test-epoch" + epoch := int64(1) + + // Setup expectations for mocked methods + mocks.AVSKeeper.EXPECT().GetEpochEndChainIDs(gomock.Any(), gomock.Any(), gomock.Any()) + mocks.OperatorKeeper.EXPECT().GetChainIDsForOperator(gomock.Any(), gomock.Any()).AnyTimes() + mocks.OperatorKeeper.EXPECT().GetOperatorConsKeyForChainID(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mocks.OperatorKeeper.EXPECT().IsOperatorRemovingKeyFromChainID(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mocks.DelegationKeeper.EXPECT().IncrementUndelegationHoldCount(gomock.Any(), gomock.Any()).AnyTimes() + + // Expect ActivateScheduledChains to be called + mocks.AVSKeeper.EXPECT().IsAVSByChainID(gomock.Any(), gomock.Any()).AnyTimes() + mocks.AVSKeeper.EXPECT().DeleteAVSInfo(gomock.Any(), gomock.Any()).AnyTimes() + + // Expect RemoveTimedoutSubscribers to be called + mocks.ClientKeeper.EXPECT().GetClientState(gomock.Any(), gomock.Any()).AnyTimes() + // mocks.ClientKeeper.EXPECT().GetLatestHeight(gomock.Any(), gomock.Any()).AnyTimes() + + // Expect QueueValidatorUpdatesForEpochID to be called + mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(gomock.Any(), gomock.Any()).AnyTimes() + mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + // Expect SendQueuedValidatorUpdates to be called + mocks.ChannelKeeper.EXPECT().GetNextSequenceSend(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + mocks.ChannelKeeper.EXPECT().SendPacket(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes() + + // Call AfterEpochEnd + require.NotPanics(t, func() { + hooks.AfterEpochEnd(ctx, identifier, epoch) + }) + +} diff --git a/x/appchain/coordinator/keeper/impl_operator_hooks.go b/x/appchain/coordinator/keeper/impl_operator_hooks.go new file mode 100644 index 000000000..3241c8907 --- /dev/null +++ b/x/appchain/coordinator/keeper/impl_operator_hooks.go @@ -0,0 +1,78 @@ +package keeper + +import ( + exocoretypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" + operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// OperatorHooksWrapper is the wrapper structure that implements the operator hooks for the +// coordinator keeper. +type OperatorHooksWrapper struct { + keeper *Keeper +} + +// Interface guards +var _ operatortypes.OperatorHooks = OperatorHooksWrapper{} + +func (k *Keeper) OperatorHooks() OperatorHooksWrapper { + return OperatorHooksWrapper{k} +} + +// AfterOperatorKeySet is the implementation of the operator hooks. +func (h OperatorHooksWrapper) AfterOperatorKeySet( + sdk.Context, sdk.AccAddress, string, exocoretypes.WrappedConsKey, +) { + // no-op +} + +// AfterOperatorKeyReplaced is the implementation of the operator hooks. +func (h OperatorHooksWrapper) AfterOperatorKeyReplaced( + ctx sdk.Context, _ sdk.AccAddress, + oldKey exocoretypes.WrappedConsKey, _ exocoretypes.WrappedConsKey, + chainID string, +) { + if chainID != utils.ChainIDWithoutRevision(ctx.ChainID()) { + consAddr := oldKey.ToConsAddr() + _, found := h.keeper.GetSubscriberValidatorForChain(ctx, chainID, consAddr) + if found { + // schedule this consensus address for pruning at the maturity of the packet containing this vscID that will + // go out at the end of this epoch. + nextVscID := h.keeper.GetVscIDForChain(ctx, chainID) + 1 + h.keeper.AppendConsAddrToPrune(ctx, chainID, nextVscID, consAddr) + // reverse lookup + h.keeper.SetMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr, nextVscID) + } else { + // delete the reverse lookup of old cons addr + chain id -> operator addr, since it was never an active + // validator. + h.keeper.operatorKeeper.DeleteOperatorAddressForChainIDAndConsAddr( + ctx, chainID, consAddr, + ) + } + } +} + +// AfterOperatorKeyRemovalInitiated is the implementation of the operator hooks. +func (h OperatorHooksWrapper) AfterOperatorKeyRemovalInitiated( + ctx sdk.Context, _ sdk.AccAddress, chainID string, key exocoretypes.WrappedConsKey, +) { + if chainID != utils.ChainIDWithoutRevision(ctx.ChainID()) { + consAddr := key.ToConsAddr() + _, found := h.keeper.GetSubscriberValidatorForChain(ctx, chainID, consAddr) + if found { + // schedule this consensus address for pruning at the maturity of the packet containing this vscID that will + // go out at the end of this epoch. + nextVscID := h.keeper.GetVscIDForChain(ctx, chainID) + 1 + h.keeper.AppendConsAddrToPrune(ctx, chainID, nextVscID, consAddr) + // reverse lookup + h.keeper.SetMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr, nextVscID) + } else { + // delete the reverse lookup of old cons addr + chain id -> operator addr, since it was never an active + // validator. + h.keeper.operatorKeeper.DeleteOperatorAddressForChainIDAndConsAddr( + ctx, chainID, consAddr, + ) + } + } +} diff --git a/x/appchain/coordinator/keeper/impl_operator_hooks_test.go b/x/appchain/coordinator/keeper/impl_operator_hooks_test.go new file mode 100644 index 000000000..27d8df9fc --- /dev/null +++ b/x/appchain/coordinator/keeper/impl_operator_hooks_test.go @@ -0,0 +1,123 @@ +package keeper_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + keepermod "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" +) + +func TestOperatorHooks(t *testing.T) { + keeper, _, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.OperatorHooks() + + require.NotNil(t, hooks) + require.IsType(t, keepermod.OperatorHooksWrapper{}, hooks) +} + +func TestAfterOperatorKeySet(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.OperatorHooks() + + // AfterOperatorKeySet should do nothing, so we just ensure it doesn't panic + require.NotPanics(t, func() { + hooks.AfterOperatorKeySet(ctx, sdk.AccAddress{}, "test-chain", testutiltx.GenerateConsensusKey()) + }) +} + +func TestAfterOperatorKeyReplaced(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.OperatorHooks() + + operator := sdk.AccAddress("testoperator") + chainID := "test-chain" + oldKey := testutiltx.GenerateConsensusKey() + newKey := testutiltx.GenerateConsensusKey() + + t.Run("Existing validator", func(t *testing.T) { + // Setup: Add a validator for the chain + validator, err := commontypes.NewSubscriberValidator(oldKey.ToConsAddr(), 100, oldKey.ToSdkKey()) + require.NoError(t, err) + keeper.SetSubscriberValidatorForChain(ctx, chainID, validator) + + // Set expectations + mocks.OperatorKeeper.EXPECT().DeleteOperatorAddressForChainIDAndConsAddr(gomock.Any(), chainID, oldKey.ToConsAddr()).Times(0) + + // Call the hook + hooks.AfterOperatorKeyReplaced(ctx, operator, oldKey, newKey, chainID) + + // Verify + nextVscID := keeper.GetVscIDForChain(ctx, chainID) + 1 + consAddrsToPrune := keeper.GetConsAddrsToPrune(ctx, chainID, nextVscID) + require.Contains(t, consAddrsToPrune.List, oldKey.ToConsAddr().Bytes()) + + maturityVscID := keeper.GetMaturityVscIDForChainIDConsAddr(ctx, chainID, oldKey.ToConsAddr()) + require.Equal(t, nextVscID, maturityVscID) + }) + + t.Run("Non-existing validator", func(t *testing.T) { + nonExistentKey := testutiltx.GenerateConsensusKey() + + // Set expectations + mocks.OperatorKeeper.EXPECT().DeleteOperatorAddressForChainIDAndConsAddr(gomock.Any(), chainID, nonExistentKey.ToConsAddr()).Times(1) + + // Call the hook + hooks.AfterOperatorKeyReplaced(ctx, operator, nonExistentKey, newKey, chainID) + + // Verify + nextVscID := keeper.GetVscIDForChain(ctx, chainID) + 1 + consAddrsToPrune := keeper.GetConsAddrsToPrune(ctx, chainID, nextVscID) + require.NotContains(t, consAddrsToPrune.List, nonExistentKey.ToConsAddr()) + }) +} + +func TestAfterOperatorKeyRemovalInitiated(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + hooks := keeper.OperatorHooks() + + operator := sdk.AccAddress("testoperator") + chainID := "test-chain" + key := testutiltx.GenerateConsensusKey() + + t.Run("Existing validator", func(t *testing.T) { + // Setup: Add a validator for the chain + validator, err := commontypes.NewSubscriberValidator(key.ToConsAddr(), 100, key.ToSdkKey()) + require.NoError(t, err) + keeper.SetSubscriberValidatorForChain(ctx, chainID, validator) + + // Set expectations + mocks.OperatorKeeper.EXPECT().DeleteOperatorAddressForChainIDAndConsAddr(gomock.Any(), chainID, key.ToConsAddr()).Times(0) + + // Call the hook + hooks.AfterOperatorKeyRemovalInitiated(ctx, operator, chainID, key) + + // Verify + nextVscID := keeper.GetVscIDForChain(ctx, chainID) + 1 + consAddrsToPrune := keeper.GetConsAddrsToPrune(ctx, chainID, nextVscID) + require.Contains(t, consAddrsToPrune.List, key.ToConsAddr().Bytes()) + + maturityVscID := keeper.GetMaturityVscIDForChainIDConsAddr(ctx, chainID, key.ToConsAddr()) + require.Equal(t, nextVscID, maturityVscID) + }) + + t.Run("Non-existing validator", func(t *testing.T) { + nonExistentKey := testutiltx.GenerateConsensusKey() + + // Set expectations + mocks.OperatorKeeper.EXPECT().DeleteOperatorAddressForChainIDAndConsAddr(gomock.Any(), chainID, nonExistentKey.ToConsAddr()).Times(1) + + // Call the hook + hooks.AfterOperatorKeyRemovalInitiated(ctx, operator, chainID, nonExistentKey) + + // Verify + nextVscID := keeper.GetVscIDForChain(ctx, chainID) + 1 + consAddrsToPrune := keeper.GetConsAddrsToPrune(ctx, chainID, nextVscID) + require.NotContains(t, consAddrsToPrune.List, nonExistentKey.ToConsAddr()) + }) +} diff --git a/x/appchain/coordinator/keeper/keeper.go b/x/appchain/coordinator/keeper/keeper.go index ead9bc645..bd3d95903 100644 --- a/x/appchain/coordinator/keeper/keeper.go +++ b/x/appchain/coordinator/keeper/keeper.go @@ -3,23 +3,31 @@ package keeper import ( "fmt" - "github.com/cometbft/cometbft/libs/log" - + "github.com/ExocoreNetwork/exocore/utils" commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" ) type Keeper struct { - cdc codec.BinaryCodec - storeKey storetypes.StoreKey - avsKeeper types.AVSKeeper - epochsKeeper types.EpochsKeeper - operatorKeeper types.OperatorKeeper - stakingKeeper types.StakingKeeper - clientKeeper commontypes.ClientKeeper + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + avsKeeper types.AVSKeeper + epochsKeeper types.EpochsKeeper + operatorKeeper types.OperatorKeeper + stakingKeeper types.StakingKeeper + delegationKeeper types.DelegationKeeper + clientKeeper commontypes.ClientKeeper + portKeeper commontypes.PortKeeper + scopedKeeper commontypes.ScopedKeeper + channelKeeper commontypes.ChannelKeeper + connectionKeeper commontypes.ConnectionKeeper + accountKeeper commontypes.AccountKeeper } // NewKeeper creates a new coordinator keeper. @@ -30,20 +38,80 @@ func NewKeeper( epochsKeeper types.EpochsKeeper, operatorKeeper types.OperatorKeeper, stakingKeeper types.StakingKeeper, + delegationKeeper types.DelegationKeeper, clientKeeper commontypes.ClientKeeper, + portKeeper commontypes.PortKeeper, + scopedKeeper commontypes.ScopedKeeper, + channelKeeper commontypes.ChannelKeeper, + connectionKeeper commontypes.ConnectionKeeper, + accountKeeper commontypes.AccountKeeper, ) Keeper { - return Keeper{ - cdc: cdc, - storeKey: storeKey, - avsKeeper: avsKeeper, - epochsKeeper: epochsKeeper, - operatorKeeper: operatorKeeper, - stakingKeeper: stakingKeeper, - clientKeeper: clientKeeper, + k := Keeper{ + cdc: cdc, + storeKey: storeKey, + avsKeeper: avsKeeper, + epochsKeeper: epochsKeeper, + operatorKeeper: operatorKeeper, + stakingKeeper: stakingKeeper, + delegationKeeper: delegationKeeper, + clientKeeper: clientKeeper, + portKeeper: portKeeper, + scopedKeeper: scopedKeeper, + channelKeeper: channelKeeper, + connectionKeeper: connectionKeeper, + accountKeeper: accountKeeper, } + k.mustValidateFields() + return k } // Logger returns a logger object for use within the module. func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) } + +// BindPort defines a wrapper function for the port Keeper's function in +// order to expose it to module's InitGenesis function +func (k Keeper) BindPort(ctx sdk.Context, portID string) error { + capability := k.portKeeper.BindPort(ctx, portID) + return k.ClaimCapability(ctx, capability, host.PortPath(portID)) +} + +// GetPort returns the portID for the IBC app module. Used in ExportGenesis +func (k Keeper) GetPort(ctx sdk.Context) string { + store := ctx.KVStore(k.storeKey) + return string(store.Get(types.PortKey())) +} + +// SetPort sets the portID for the IBC app module. Used in InitGenesis +func (k Keeper) SetPort(ctx sdk.Context, portID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.PortKey(), []byte(portID)) +} + +// ClaimCapability allows the IBC app module to claim a capability that core IBC +// passes to it +func (k Keeper) ClaimCapability( + ctx sdk.Context, cap *capabilitytypes.Capability, name string, +) error { + return k.scopedKeeper.ClaimCapability(ctx, cap, name) +} + +// mustValidateFields ensures that all the required fields are set. It does not count the number +func (k Keeper) mustValidateFields() { + // TODO: there is no way to count the number of fields here, besides using reflect, which + // fails the Linter. The developer should ensure to add the fields here when adding new fields. + utils.PanicIfNil(k.storeKey, "storeKey") + utils.PanicIfNil(k.cdc, "cdc") + utils.PanicIfNil(k.avsKeeper, "avsKeeper") + utils.PanicIfNil(k.epochsKeeper, "epochsKeeper") + utils.PanicIfNil(k.operatorKeeper, "operatorKeeper") + utils.PanicIfNil(k.stakingKeeper, "stakingKeeper") + utils.PanicIfNil(k.delegationKeeper, "delegationKeeper") + utils.PanicIfNil(k.clientKeeper, "clientKeeper") + utils.PanicIfNil(k.portKeeper, "portKeeper") + utils.PanicIfNil(k.scopedKeeper, "scopedKeeper") + utils.PanicIfNil(k.channelKeeper, "channelKeeper") + utils.PanicIfNil(k.connectionKeeper, "connectionKeeper") + utils.PanicIfNil(k.accountKeeper, "accountKeeper") +} diff --git a/x/appchain/coordinator/keeper/params_test.go b/x/appchain/coordinator/keeper/params_test.go new file mode 100644 index 000000000..14b380675 --- /dev/null +++ b/x/appchain/coordinator/keeper/params_test.go @@ -0,0 +1,57 @@ +package keeper_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" +) + +func TestGetParams(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + params := types.DefaultParams() + + k.SetParams(ctx, params) + + require.EqualValues(t, params, k.GetParams(ctx)) +} + +func TestSetParams(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + // Create custom params + customParams := types.Params{ + TemplateClient: nil, // You may want to create a mock ClientState here + TrustingPeriodFraction: "0.8", + IBCTimeoutPeriod: 24 * time.Hour, + InitTimeoutPeriod: epochstypes.Epoch{EpochIdentifier: "day", EpochNumber: 1}, + VSCTimeoutPeriod: epochstypes.Epoch{EpochIdentifier: "day", EpochNumber: 2}, + } + + // Set custom params + k.SetParams(ctx, customParams) + + // Get params and verify they match the custom params + retrievedParams := k.GetParams(ctx) + require.EqualValues(t, customParams, retrievedParams) + + // Verify individual fields + require.Equal(t, customParams.TrustingPeriodFraction, retrievedParams.TrustingPeriodFraction) + require.Equal(t, customParams.IBCTimeoutPeriod, retrievedParams.IBCTimeoutPeriod) + require.Equal(t, customParams.InitTimeoutPeriod, retrievedParams.InitTimeoutPeriod) + require.Equal(t, customParams.VSCTimeoutPeriod, retrievedParams.VSCTimeoutPeriod) +} + +func TestGetParams_Default(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + // Get params without setting them first + params := k.GetParams(ctx) + + // Verify that default params are returned + require.EqualValues(t, types.DefaultParams(), params) +} diff --git a/x/appchain/coordinator/keeper/register.go b/x/appchain/coordinator/keeper/register.go index 57076a56e..acd125072 100644 --- a/x/appchain/coordinator/keeper/register.go +++ b/x/appchain/coordinator/keeper/register.go @@ -42,7 +42,7 @@ func (k Keeper) AddSubscriberChain( UnbondingPeriod: uint64(unbondingEpochs), // estimated MinSelfDelegation: req.MinSelfDelegationUsd, EpochIdentifier: req.EpochIdentifier, - ChainID: req.ChainID, + ChainID: req.ChainID, // use the one with the version intentionally // TODO: remove the owner role and make it controllable by subscriber-governance AvsOwnerAddress: []string{req.FromAddress}, }); err != nil { diff --git a/x/appchain/coordinator/keeper/register_test.go b/x/appchain/coordinator/keeper/register_test.go new file mode 100644 index 000000000..a1c516838 --- /dev/null +++ b/x/appchain/coordinator/keeper/register_test.go @@ -0,0 +1,110 @@ +package keeper_test + +import ( + "errors" + "testing" + + testutil "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestKeeper_AddSubscriberChain(t *testing.T) { + k, ctx, mocks := testutil.NewCoordinatorKeeper(t) + + tests := []struct { + name string + req *types.RegisterSubscriberChainRequest + setup func() + wantErr bool + }{ + { + name: "success", + req: &types.RegisterSubscriberChainRequest{ + ChainID: "test-chain-1", + EpochIdentifier: "day", + AssetIDs: []string{"1", "2"}, + MinSelfDelegationUsd: uint64(1000), + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: 100, + }, + FromAddress: "exo1...", + }, + setup: func() { + mocks.EpochsKeeper.EXPECT().GetEpochInfo(ctx, "day").Return(epochstypes.EpochInfo{ + Identifier: "day", + Duration: 86400, + CurrentEpoch: 10, + }, true) + mocks.AVSKeeper.EXPECT().RegisterAVSWithChainID(ctx, gomock.Any()).Return(common.Address{}, nil) + }, + wantErr: false, + }, + { + name: "nil request", + req: nil, + wantErr: true, + }, + { + name: "epoch not found", + req: &types.RegisterSubscriberChainRequest{ + ChainID: "test-chain-2", + EpochIdentifier: "week", + }, + setup: func() { + mocks.EpochsKeeper.EXPECT().GetEpochInfo(ctx, "week").Return(epochstypes.EpochInfo{}, false) + }, + wantErr: true, + }, + { + name: "avs registration fails", + req: &types.RegisterSubscriberChainRequest{ + ChainID: "test-chain-3", + EpochIdentifier: "day", + AssetIDs: []string{"1"}, + MinSelfDelegationUsd: uint64(1000), + SubscriberParams: commontypes.SubscriberParams{ + UnbondingPeriod: 100, + }, + FromAddress: "exo1...", + }, + setup: func() { + mocks.EpochsKeeper.EXPECT().GetEpochInfo(ctx, "day").Return(epochstypes.EpochInfo{ + Identifier: "day", + Duration: 86400, + CurrentEpoch: 10, + }, true) + mocks.AVSKeeper.EXPECT().RegisterAVSWithChainID(ctx, gomock.Any()).Return(common.Address{}, errors.New("avs registration failed")) + }, + wantErr: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if tt.setup != nil { + tt.setup() + } + + resp, err := k.AddSubscriberChain(ctx, tt.req) + + if tt.wantErr { + assert.Error(t, err) + assert.Nil(t, resp) + } else { + assert.NoError(t, err) + assert.NotNil(t, resp) + + // Check if the pending subscriber chain was appended + pendingChains := k.GetPendingSubChains(ctx, tt.req.EpochIdentifier, uint64(10)) + require.Len(t, pendingChains.List, 1) + assert.Equal(t, *tt.req, pendingChains.List[0]) + } + }) + } +} diff --git a/x/appchain/coordinator/keeper/relay.go b/x/appchain/coordinator/keeper/relay.go new file mode 100644 index 000000000..a6eab6dac --- /dev/null +++ b/x/appchain/coordinator/keeper/relay.go @@ -0,0 +1,182 @@ +package keeper + +import ( + "fmt" + + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +// OnRecvSlashPacket processes a slashing packet upon its receipt from +// the subscriber chain. At this point, it only handles DOWNTIME infractions. +// TODO: Design and implement EQUIVOCATION slashing. +// The returned value is a byte slice containing the acknowledgment to send to +// the sender. Otherwise, it should be an error. +func (k Keeper) OnRecvSlashPacket( + ctx sdk.Context, packet channeltypes.Packet, data commontypes.SlashPacketData, +) ([]byte, error) { + chainID, found := k.GetChainForChannel(ctx, packet.DestinationChannel) + if !found { + k.Logger(ctx).Error( + "received slash packet for unknown channel", + "channel", packet.DestinationChannel, + ) + return nil, types.ErrUnknownSubscriberChannelID.Wrapf( + "slash packet on %s", packet.DestinationChannel, + ) + } + // stateless validation + if err := data.Validate(); err != nil { + return nil, commontypes.ErrInvalidPacketData.Wrapf( + "invalid slash packet: %s", err, + ) + } + // stateful validation + if err := k.ValidateSlashPacket(ctx, chainID, data); err != nil { + k.Logger(ctx).Error( + "invalid slash packet", + "error", err, + "chainID", chainID, + "vscID", data.ValsetUpdateID, + "consensus address", fmt.Sprintf("%x", data.Validator.Address), + "infraction type", data.Infraction, + ) + return nil, commontypes.ErrInvalidPacketData.Wrapf( + "invalid slash packet %s", err, + ) + } + // TODO: handle throttling of slash packets to ensure that malicious / misconfigured + // appchains don't spam the coordinator with slash packets to produce repeated + // slashing events. When throttling is implemented, indicate to the subscriber + // that a packet wasn't handled and should be retried later. + k.HandleSlashPacket(ctx, chainID, data) + k.Logger(ctx).Info( + "slash packet received and handled", + "chainID", chainID, + "consensus address", fmt.Sprintf("%x", data.Validator.Address), + "vscID", data.ValsetUpdateID, + "infractionType", data.Infraction, + ) + + // Return result ack that the packet was handled successfully + return commontypes.SlashPacketHandledResult.Bytes(), nil +} + +// OnRecvVscMaturedPacket handles a VscMatured packet and returns a no-op result ack. +func (k Keeper) OnRecvVscMaturedPacket( + ctx sdk.Context, + packet channeltypes.Packet, + data commontypes.VscMaturedPacketData, +) error { + // check that the channel is established, panic if not + chainID, found := k.GetChainForChannel(ctx, packet.DestinationChannel) + if !found { + // VSCMatured packet was sent on a channel different than any of the established + // channels; this should never happen + k.Logger(ctx).Error( + "VscMaturedPacket received on unknown channel", + "channelID", packet.DestinationChannel, + ) + return types.ErrUnknownSubscriberChannelID.Wrapf( + "vsc matured packet on %s", packet.DestinationChannel, + ) + } + + k.HandleVscMaturedPacket(ctx, chainID, data) + + k.Logger(ctx).Info( + "VscMaturedPacket handled", + "chainID", chainID, + "vscID", data.ValsetUpdateID, + ) + + return nil +} + +// HandleVscMaturedPacket handles a VscMatured packet. +func (k Keeper) HandleVscMaturedPacket( + sdk.Context, string, commontypes.VscMaturedPacketData, +) { + // records := k.GetUndelegationsToMature(ctx, chainID, data.ValsetUpdateID) + // // this is matured at EndBlock, because the delegation keeper only releases the funds + // // at EndBlock. it is pointless to mature any of these now. + // // do note that this is the reason that the EndBlocker of this module is triggered + // // before that of the undelegation module. + // k.AppendMaturedUndelegations(ctx, records) + // k.ClearUndelegationsToMature(ctx, chainID, data.ValsetUpdateID) + + // operators := k.GetOptOutsToFinish(ctx, chainID, data.ValsetUpdateID) + // k.AppendFinishedOptOutsForChainID(ctx, chainID, operators) + // k.ClearOptOutsToFinish(ctx, chainID, data.ValsetUpdateID) + + // // if there are any opt outs, the key can be removed. similarly, + // // if there are any key replacements, the old key should be pruned + // addrs := k.GetConsensusKeysToPrune(ctx, chainID, data.ValsetUpdateID) + // for _, addr := range addrs { + // // this is pruned immediately so that an operator may reuse the same key immediately + // k.Logger(ctx).Debug("pruning key", "addr", addr, "chainId", chainID) + // k.operatorKeeper.DeleteOperatorAddressForChainIDAndConsAddr(ctx, chainID, addr) + // } + // k.ClearConsensusKeysToPrune(ctx, chainID, data.ValsetUpdateID) +} + +// OnAcknowledgementPacket handles acknowledgments for sent VSC packets +func (k Keeper) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ack channeltypes.Acknowledgement, +) error { + if err := ack.GetError(); err != "" { + k.Logger(ctx).Error( + "recv ErrorAcknowledgement", + "channelID", packet.SourceChannel, + "error", err, + ) + if chainID, ok := k.GetChainForChannel(ctx, packet.DestinationChannel); ok { + return k.StopSubscriberChain(ctx, chainID, false) + } + return types.ErrUnknownSubscriberChannelID.Wrapf( + "ack packet on %s", packet.DestinationChannel, + ) + } + return nil +} + +// OnTimeoutPacket aborts the transaction if no chain exists for the destination channel, +// otherwise it stops the chain +func (k Keeper) OnTimeoutPacket(ctx sdk.Context, packet channeltypes.Packet) error { + chainID, found := k.GetChainForChannel(ctx, packet.SourceChannel) + if !found { + k.Logger(ctx).Error( + "packet timeout, unknown channel", + "channelID", packet.SourceChannel, + ) + return types.ErrUnknownSubscriberChannelID.Wrapf( + "ack packet on %s", packet.DestinationChannel, + ) + } + // stop chain and release unbondings + k.Logger(ctx).Info( + "packet timeout, removing the subscriber", + "chainID", chainID, + ) + return k.StopSubscriberChain(ctx, chainID, false) +} + +// StopSubscriberChain stops the subscriber chain and releases any unbondings. +// During the stoppage, it will prune any information that is no longer needed +// to save space. +// The closeChannel flag indicates whether the channel should be closed. +func (k Keeper) StopSubscriberChain( + ctx sdk.Context, chainID string, closeChannel bool, +) error { + k.Logger(ctx).Info( + "stopping subscriber chain", + "chainID", chainID, + "closeChannel", closeChannel, + ) + // not yet implemented + return nil +} diff --git a/x/appchain/coordinator/keeper/relay_test.go b/x/appchain/coordinator/keeper/relay_test.go new file mode 100644 index 000000000..e3dec583e --- /dev/null +++ b/x/appchain/coordinator/keeper/relay_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + keepertest "github.com/ExocoreNetwork/exocore/testutil/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + coordinatortypes "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +func TestOnRecvSlashPacket(t *testing.T) { + k, ctx, mocks := keepertest.NewCoordinatorKeeper(t) + + testCases := []struct { + name string + setupMocks func() + packet channeltypes.Packet + data types.SlashPacketData + expectedError error + }{ + { + name: "unknown channel", + setupMocks: func() {}, + packet: channeltypes.Packet{DestinationChannel: "unknown-channel"}, + data: types.SlashPacketData{}, + expectedError: coordinatortypes.ErrUnknownSubscriberChannelID.Wrapf( + "slash packet on unknown-channel", + ), + }, + { + name: "invalid packet data", + setupMocks: func() { + k.SetChainForChannel(ctx, "channel-0", "chain-0") + // mocks.ChannelKeeper.EXPECT().GetChannel(gomock.Any(), gomock.Any(), gomock.Any()).Return(channeltypes.Channel{}, true) + }, + packet: channeltypes.Packet{DestinationChannel: "channel-0"}, + data: types.SlashPacketData{}, + expectedError: types.ErrInvalidPacketData.Wrapf( + "invalid slash packet: empty validator address", + ), + }, + { + name: "successful slash packet", + setupMocks: func() { + k.SetChainForChannel(ctx, "channel-0", "chain-0") + k.MapHeightToChainVscID(ctx, "chain-0", 1, 1) + mocks.OperatorKeeper.EXPECT().GetOperatorAddressForChainIDAndConsAddr(gomock.Any(), gomock.Any(), gomock.Any()).Return(true, sdk.AccAddress([]byte("hello"))).Times(2) + mocks.AVSKeeper.EXPECT().IsAVSByChainID(gomock.Any(), gomock.Any()).Return(true, avstypes.GenerateAVSAddr("chain-0")) + mocks.OperatorKeeper.EXPECT().ApplySlashForHeight(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + }, + packet: channeltypes.Packet{DestinationChannel: "channel-0"}, + data: *types.NewSlashPacketData( + abci.Validator{Address: []byte("validator"), Power: 1}, 1, stakingtypes.Infraction_INFRACTION_DOWNTIME, + ), + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMocks() + result, err := k.OnRecvSlashPacket(ctx, tc.packet, tc.data) + if tc.expectedError != nil { + require.Error(t, err) + require.ErrorIs(t, err, tc.expectedError) + } else { + require.NoError(t, err) + require.Equal(t, types.SlashPacketHandledResult.Bytes(), result) + } + }) + } +} + +func TestOnRecvVscMaturedPacket(t *testing.T) { + k, ctx, mocks := keepertest.NewCoordinatorKeeper(t) + _ = mocks + + testCases := []struct { + name string + setupMocks func() + packet channeltypes.Packet + data types.VscMaturedPacketData + expectedError error + }{ + { + name: "unknown channel", + setupMocks: func() {}, + packet: channeltypes.Packet{DestinationChannel: "unknown-channel"}, + data: types.VscMaturedPacketData{}, + expectedError: coordinatortypes.ErrUnknownSubscriberChannelID.Wrapf( + "vsc matured packet on unknown-channel", + ), + }, + { + name: "successful vsc matured packet", + setupMocks: func() { + k.SetChainForChannel(ctx, "channel-0", "chain-0") + // mocks.ChannelKeeper.EXPECT().GetChannel(gomock.Any(), gomock.Any(), gomock.Any()).Return(channeltypes.Channel{}, true) + }, + packet: channeltypes.Packet{DestinationChannel: "channel-0"}, + data: types.VscMaturedPacketData{ValsetUpdateID: 1}, + expectedError: nil, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setupMocks() + err := k.OnRecvVscMaturedPacket(ctx, tc.packet, tc.data) + if tc.expectedError != nil { + require.Error(t, err) + require.ErrorIs(t, err, tc.expectedError) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/x/appchain/coordinator/keeper/slash.go b/x/appchain/coordinator/keeper/slash.go index 5f4f6cb61..c94d65d2d 100644 --- a/x/appchain/coordinator/keeper/slash.go +++ b/x/appchain/coordinator/keeper/slash.go @@ -3,12 +3,109 @@ package keeper import ( "time" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" types "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" sdk "github.com/cosmos/cosmos-sdk/types" ) -// TODO: this file should be in the x/avs keeper instead. +// ValidateSlashPacket validates a slashing packet. It checks that +// (1) the valset update id maps back to an Exocore height, and +// (2) the validator (cons) address maps back to an operator account address. +// The caller must perform stateless validation by themselves. +func (k Keeper) ValidateSlashPacket( + ctx sdk.Context, chainID string, data commontypes.SlashPacketData, +) error { + // the validator set is generated at each epoch and following each slash. even if the set + // does not change at each epoch, we send an empty update anyway since, otherwise, the + // subscriber could time out the coordinator. + if k.GetHeightForChainVscID(ctx, chainID, data.ValsetUpdateID) == 0 { + return commontypes.ErrInvalidPacketData.Wrapf( + "invalid chainID %s valsetUpdateID %d", chainID, data.ValsetUpdateID, + ) + } + // the second step is to find the operator account address against the consensus address + // of the validator. if the operator is not found, the slashing packet is invalid. + // note that this works even if the operator changes their consensus key, as long as the + // key hasn't yet been pruned from the operator module. + if found, _ := k.operatorKeeper.GetOperatorAddressForChainIDAndConsAddr( + ctx, chainID, sdk.ConsAddress(data.Validator.Address), + ); !found { + // don't bech32 encode it in the error since the appchain may have a different prefix + return commontypes.ErrInvalidPacketData.Wrapf( + "operator not found %x", data.Validator.Address, + ) + } + return nil +} + +// HandleSlashPacket handles a slashing packet. The caller must ensure that the slashing packet +// is valid before calling this function. The function forwards the slashing request to the +// operator module, which will trigger a slashing hook and thus a validator set update containing +// the slashing acknowledgment. +func (k Keeper) HandleSlashPacket(ctx sdk.Context, chainID string, data commontypes.SlashPacketData) { + consAddress := sdk.ConsAddress(data.Validator.Address) + // never 0, since already validated + height := k.GetHeightForChainVscID(ctx, chainID, data.ValsetUpdateID) + // guaranteed to exist, since already validated + _, operatorAccAddress := k.operatorKeeper.GetOperatorAddressForChainIDAndConsAddr( + ctx, chainID, consAddress, + ) + slashProportion := k.GetSubSlashFractionDowntime(ctx, chainID) + // #nosec G703 // already validated + slashProportionDecimal, _ := sdk.NewDecFromStr(slashProportion) + jailDuration := k.GetSubDowntimeJailDuration(ctx, chainID) + _, avsAddress := k.avsKeeper.IsAVSByChainID(ctx, chainID) + // the slashing hook should trigger a validator set update for all affected AVSs. since the `chainID` is one of them + // we should make sure we are well set up for that update. we will include an ack of the slash packet in the next + // validator set update; record that here. + k.AppendSlashAck(ctx, chainID, consAddress) + if err := k.operatorKeeper.ApplySlashForHeight( + ctx, operatorAccAddress, avsAddress, height, + slashProportionDecimal, data.Infraction, jailDuration, + ); err != nil { + k.Logger(ctx).Error( + "failed to apply slash for height", + "chainID", chainID, "height", height, "consAddress", consAddress, "error", err, + ) + } +} + +// AppendSlashAck appends a slashing acknowledgment for a chain, to be sent in the next validator set update. +func (k Keeper) AppendSlashAck(ctx sdk.Context, chainID string, consAddress sdk.ConsAddress) { + prev := k.GetSlashAcks(ctx, chainID) + prev.List = append(prev.List, consAddress) + k.SetSlashAcks(ctx, chainID, prev) +} + +// GetSlashAcks gets the slashing acknowledgments for a chain, to be sent in the next validator set update. +func (k Keeper) GetSlashAcks(ctx sdk.Context, chainID string) types.ConsensusAddresses { + store := ctx.KVStore(k.storeKey) + consAddresses := types.ConsensusAddresses{} + key := types.SlashAcksKey(chainID) + value := store.Get(key) + k.cdc.MustUnmarshal(value, &consAddresses) + return consAddresses +} + +// SetSlashAcks sets the slashing acknowledgments for a chain, to be sent in the next validator set update. +func (k Keeper) SetSlashAcks(ctx sdk.Context, chainID string, consAddresses types.ConsensusAddresses) { + store := ctx.KVStore(k.storeKey) + key := types.SlashAcksKey(chainID) + if len(consAddresses.List) == 0 { + store.Delete(key) + } else { + store.Set(key, k.cdc.MustMarshal(&consAddresses)) + } +} + +// ConsumeSlashAcks consumes the slashing acknowledgments for a chain, to be sent in the next validator set update. +func (k Keeper) ConsumeSlashAcks(ctx sdk.Context, chainID string) [][]byte { + ret := k.GetSlashAcks(ctx, chainID) + k.SetSlashAcks(ctx, chainID, types.ConsensusAddresses{}) + return ret.List +} +// TODO: these fields should be in the AVS keeper instead. // SetSubSlashFractionDowntime sets the sub slash fraction downtime for a chain func (k Keeper) SetSubSlashFractionDowntime(ctx sdk.Context, chainID string, fraction string) { store := ctx.KVStore(k.storeKey) diff --git a/x/appchain/coordinator/keeper/slash_test.go b/x/appchain/coordinator/keeper/slash_test.go new file mode 100644 index 000000000..3474003b4 --- /dev/null +++ b/x/appchain/coordinator/keeper/slash_test.go @@ -0,0 +1,156 @@ +package keeper_test + +import ( + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + abci "github.com/cometbft/cometbft/abci/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +func TestKeeper_ValidateSlashPacket(t *testing.T) { + k, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + valsetUpdateID := uint64(1) + validatorAddress := []byte("validator-address") + + // Set up mock expectations + mocks.AVSKeeper.EXPECT().IsAVSByChainID(gomock.Any(), chainID).Return( + true, avstypes.GenerateAVSAddr(chainID), + ).AnyTimes() + mocks.OperatorKeeper.EXPECT().GetOperatorAddressForChainIDAndConsAddr( + gomock.Any(), chainID, sdk.ConsAddress(validatorAddress), + ).Return(true, sdk.AccAddress{}).AnyTimes() + + t.Run("valid packet", func(t *testing.T) { + k.MapHeightToChainVscID(ctx, chainID, valsetUpdateID, 100) + + data := commontypes.NewSlashPacketData( + abci.Validator{ + Address: validatorAddress, + }, + valsetUpdateID, + stakingtypes.Infraction_INFRACTION_DOWNTIME, + ) + + err := k.ValidateSlashPacket(ctx, chainID, *data) + require.NoError(t, err) + }) + + t.Run("invalid valset update ID", func(t *testing.T) { + data := commontypes.NewSlashPacketData( + abci.Validator{ + Address: validatorAddress, + }, + 999, + stakingtypes.Infraction_INFRACTION_DOWNTIME, + ) + + err := k.ValidateSlashPacket(ctx, chainID, *data) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid chainID") + }) + +} + +func TestKeeper_HandleSlashPacket(t *testing.T) { + k, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + valsetUpdateID := uint64(1) + validatorAddress := []byte("validator-address") + operatorAddress := sdk.AccAddress("operator-address") + avsAddress := avstypes.GenerateAVSAddr(chainID) + + // Set up mock expectations + mocks.AVSKeeper.EXPECT().IsAVSByChainID(gomock.Any(), chainID).Return(true, avsAddress).AnyTimes() + mocks.OperatorKeeper.EXPECT().GetOperatorAddressForChainIDAndConsAddr( + gomock.Any(), chainID, sdk.ConsAddress(validatorAddress), + ).Return(true, operatorAddress).AnyTimes() + mocks.OperatorKeeper.EXPECT().ApplySlashForHeight( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(nil).AnyTimes() + + t.Run("handle slash packet", func(t *testing.T) { + k.MapHeightToChainVscID(ctx, chainID, valsetUpdateID, 100) + k.SetSubSlashFractionDowntime(ctx, chainID, "0.1") + k.SetSubDowntimeJailDuration(ctx, chainID, time.Hour) + + data := commontypes.NewSlashPacketData( + abci.Validator{ + Address: validatorAddress, + }, + valsetUpdateID, + stakingtypes.Infraction_INFRACTION_DOWNTIME, + ) + + k.HandleSlashPacket(ctx, chainID, *data) + + // Check if slash ack was appended + slashAcks := k.GetSlashAcks(ctx, chainID) + require.Len(t, slashAcks.List, 1) + require.Equal(t, sdk.ConsAddress(validatorAddress).Bytes(), slashAcks.List[0]) + }) +} + +func TestKeeper_SlashAcks(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + consAddress1 := sdk.ConsAddress("validator1") + consAddress2 := sdk.ConsAddress("validator2") + + t.Run("append and get slash acks", func(t *testing.T) { + k.AppendSlashAck(ctx, chainID, consAddress1) + k.AppendSlashAck(ctx, chainID, consAddress2) + + slashAcks := k.GetSlashAcks(ctx, chainID) + require.Len(t, slashAcks.List, 2) + require.Equal(t, consAddress1.Bytes(), slashAcks.List[0]) + require.Equal(t, consAddress2.Bytes(), slashAcks.List[1]) + }) + + t.Run("consume slash acks", func(t *testing.T) { + consumedAcks := k.ConsumeSlashAcks(ctx, chainID) + require.Len(t, consumedAcks, 2) + require.Equal(t, consAddress1.Bytes(), consumedAcks[0]) + require.Equal(t, consAddress2.Bytes(), consumedAcks[1]) + + // Check that acks were cleared + remainingAcks := k.GetSlashAcks(ctx, chainID) + require.Empty(t, remainingAcks.List) + }) +} + +func TestKeeper_SubSlashFractionAndJailDuration(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + + t.Run("set and get sub slash fraction downtime", func(t *testing.T) { + k.SetSubSlashFractionDowntime(ctx, chainID, "0.1") + fraction := k.GetSubSlashFractionDowntime(ctx, chainID) + require.Equal(t, "0.1", fraction) + }) + + t.Run("set and get sub slash fraction double sign", func(t *testing.T) { + k.SetSubSlashFractionDoubleSign(ctx, chainID, "0.5") + fraction := k.GetSubSlashFractionDoubleSign(ctx, chainID) + require.Equal(t, "0.5", fraction) + }) + + t.Run("set and get sub downtime jail duration", func(t *testing.T) { + duration := 24 * time.Hour + k.SetSubDowntimeJailDuration(ctx, chainID, duration) + retrievedDuration := k.GetSubDowntimeJailDuration(ctx, chainID) + require.Equal(t, duration, retrievedDuration) + }) +} diff --git a/x/appchain/coordinator/keeper/timeout.go b/x/appchain/coordinator/keeper/timeout.go index d0a764a91..999be6230 100644 --- a/x/appchain/coordinator/keeper/timeout.go +++ b/x/appchain/coordinator/keeper/timeout.go @@ -1,6 +1,8 @@ package keeper import ( + "fmt" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -33,6 +35,10 @@ func (k Keeper) SetChainsToInitTimeout( ctx sdk.Context, epoch epochstypes.Epoch, chains types.ChainIDs, ) { store := ctx.KVStore(k.storeKey) + if len(chains.List) == 0 { + store.Delete(types.InitTimeoutEpochKey(epoch)) + return + } bz := k.cdc.MustMarshal(&chains) store.Set(types.InitTimeoutEpochKey(epoch), bz) } @@ -52,11 +58,120 @@ func (k Keeper) RemoveChainFromInitTimeout( k.SetChainsToInitTimeout(ctx, epoch, prev) } -// ClearChainsToInitTimeout clears the list of chains which will timeout (if not initialized by then) -// at the end of the epoch. -func (k Keeper) ClearChainsToInitTimeout( - ctx sdk.Context, epoch epochstypes.Epoch, +// SetChainInitTimeout stores a lookup from chain to the epoch by the end of which +// it must be initialized. +func (k Keeper) SetChainInitTimeout( + ctx sdk.Context, chainID string, epoch epochstypes.Epoch, +) { + store := ctx.KVStore(k.storeKey) + store.Set(types.ChainInitTimeoutKey(chainID), k.cdc.MustMarshal(&epoch)) +} + +// GetChainInitTimeout returns the epoch by the end of which the chain must be initialized. +func (k Keeper) GetChainInitTimeout( + ctx sdk.Context, chainID string, +) (epoch epochstypes.Epoch, found bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.ChainInitTimeoutKey(chainID)) + if bz == nil { + return epoch, false + } + k.cdc.MustUnmarshal(bz, &epoch) + return epoch, true +} + +// DeleteChainInitTimeout deletes the lookup from chain to the epoch by the end of which +// it must be initialized. +func (k Keeper) DeleteChainInitTimeout(ctx sdk.Context, chainID string) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.ChainInitTimeoutKey(chainID)) +} + +// SetVscTimeout stores the epoch by the end of which a response to a VSC must be received. +func (k Keeper) SetVscTimeout( + ctx sdk.Context, chainID string, vscID uint64, timeout epochstypes.Epoch, ) { store := ctx.KVStore(k.storeKey) - store.Delete(types.InitTimeoutEpochKey(epoch)) + store.Set(types.VscTimeoutKey(chainID, vscID), k.cdc.MustMarshal(&timeout)) +} + +// GetVscTimeout returns the epoch by the end of which a response to a VSC must be received. +func (k Keeper) GetVscTimeout( + ctx sdk.Context, chainID string, vscID uint64, +) (timeout epochstypes.Epoch, found bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.VscTimeoutKey(chainID, vscID)) + if bz == nil { + return timeout, false + } + k.cdc.MustUnmarshal(bz, &timeout) + return timeout, true +} + +// GetFirstVscTimeout returns the first epoch by the end of which a response to a VSC must be received. +func (k Keeper) GetFirstVscTimeout( + ctx sdk.Context, chainID string, +) (timeout epochstypes.Epoch, vscID uint64, found bool) { + store := ctx.KVStore(k.storeKey) + partialKey := append( + []byte{types.VscTimeoutBytePrefix}, + []byte(chainID)..., + ) + iterator := sdk.KVStorePrefixIterator(store, partialKey) + defer iterator.Close() + + if iterator.Valid() { + _, vscID, err := types.ParseVscTimeoutKey(iterator.Key()) + if err != nil { + return timeout, 0, false + } + bz := iterator.Value() + k.cdc.MustUnmarshal(bz, &timeout) + return timeout, vscID, true + } + return timeout, 0, false +} + +// DeleteVscTimeout deletes the epoch by the end of which a response to a VSC must be received. +func (k Keeper) DeleteVscTimeout(ctx sdk.Context, chainID string, vscID uint64) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.VscTimeoutKey(chainID, vscID)) +} + +// RemoveTimedoutSubscribers removes the subscribers that are timed out at the end of the current epoch. +// epochNumber passed is the current epoch number, which is ending. +func (k Keeper) RemoveTimedoutSubscribers(ctx sdk.Context, epochIdentifier string, epochNumber int64) { + // init timeout chains + epoch := epochstypes.NewEpoch(uint64(epochNumber), epochIdentifier) + chains := k.GetChainsToInitTimeout(ctx, epoch) + for _, chainID := range chains.List { + if err := k.StopSubscriberChain(ctx, chainID, true); err != nil { + k.Logger(ctx).Error("failed to stop subscriber chain", "chainID", chainID, "error", err) + continue + } + k.DeleteChainInitTimeout(ctx, chainID) // prune + } + // vsc timeout chains + vscChains := k.GetAllChainsWithChannels(ctx) + for _, chainID := range vscChains { + fmt.Println("chainID", chainID) + timeout, vscID, found := k.GetFirstVscTimeout(ctx, chainID) + if !found { + continue + } + fmt.Println("timeout", timeout) + if timeout.EpochIdentifier == epochIdentifier && timeout.EpochNumber <= uint64(epochNumber) { + k.Logger(ctx).Info( + "VSC timed out, removing subscriber", + "chainID", chainID, + "epochIdentifier", timeout.EpochIdentifier, + "epochNumber", timeout.EpochNumber, + ) + if err := k.StopSubscriberChain(ctx, chainID, true); err != nil { + k.Logger(ctx).Error("failed to stop subscriber chain", "chainID", chainID, "error", err) + continue + } + k.DeleteVscTimeout(ctx, chainID, vscID) // prune + } + } } diff --git a/x/appchain/coordinator/keeper/timeout_test.go b/x/appchain/coordinator/keeper/timeout_test.go new file mode 100644 index 000000000..34e990ca3 --- /dev/null +++ b/x/appchain/coordinator/keeper/timeout_test.go @@ -0,0 +1,123 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" +) + +func TestAppendAndGetChainsToInitTimeout(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + epoch := epochstypes.NewEpoch(1, "test") + chainID := "test-chain" + + // Append chain to init timeout + k.AppendChainToInitTimeout(ctx, epoch, chainID) + k.SetChainInitTimeout(ctx, chainID, epoch) + + // Get chains to init timeout + chains := k.GetChainsToInitTimeout(ctx, epoch) + require.Equal(t, 1, len(chains.List)) + require.Equal(t, chainID, chains.List[0]) + epoch, found := k.GetChainInitTimeout(ctx, chainID) + require.True(t, found) + require.Equal(t, epoch, epoch) +} + +func TestRemoveChainFromInitTimeout(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + epoch := epochstypes.NewEpoch(1, "test") + chainID1 := "test-chain-1" + chainID2 := "test-chain-2" + + // Append chains to init timeout + k.AppendChainToInitTimeout(ctx, epoch, chainID1) + k.SetChainInitTimeout(ctx, chainID1, epoch) + k.AppendChainToInitTimeout(ctx, epoch, chainID2) + k.SetChainInitTimeout(ctx, chainID2, epoch) + + // Remove one chain + k.RemoveChainFromInitTimeout(ctx, epoch, chainID1) + k.DeleteChainInitTimeout(ctx, chainID1) + + // Check remaining chains + chains := k.GetChainsToInitTimeout(ctx, epoch) + require.Equal(t, 1, len(chains.List)) + require.Equal(t, chainID2, chains.List[0]) + _, found := k.GetChainInitTimeout(ctx, chainID1) + require.False(t, found) + epoch, found = k.GetChainInitTimeout(ctx, chainID2) + require.True(t, found) + require.Equal(t, epoch, epoch) +} + +func TestSetAndGetVscTimeout(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + chainID := "test-chain" + vscID := uint64(1) + timeout := epochstypes.NewEpoch(2, "test") + + // Set VSC timeout + k.SetVscTimeout(ctx, chainID, vscID, timeout) + + // Get VSC timeout + storedTimeout, found := k.GetVscTimeout(ctx, chainID, vscID) + require.True(t, found) + require.Equal(t, timeout, storedTimeout) + + // Delete VSC timeout + k.DeleteVscTimeout(ctx, chainID, vscID) + + // Check if deleted + _, found = k.GetVscTimeout(ctx, chainID, vscID) + require.False(t, found) +} + +func TestGetFirstVscTimeout(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + chainID := "test-chain" + timeout1 := epochstypes.NewEpoch(2, "test") + timeout2 := epochstypes.NewEpoch(3, "test") + + // Set multiple VSC timeouts + k.SetVscTimeout(ctx, chainID, 1, timeout1) + k.SetVscTimeout(ctx, chainID, 2, timeout2) + + // Get first VSC timeout + firstTimeout, vscID, found := k.GetFirstVscTimeout(ctx, chainID) + require.True(t, found) + require.Equal(t, timeout1, firstTimeout) + require.Equal(t, uint64(1), vscID) +} + +func TestRemoveTimedoutSubscribers(t *testing.T) { + k, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + epochIdentifier := "test" + epochNumber := int64(5) + + chainID1 := "test-chain-1" + chainID2 := "test-chain-2" + channelID := "channel-0" + + // Set up test data + epoch := epochstypes.NewEpoch(uint64(epochNumber), epochIdentifier) + k.AppendChainToInitTimeout(ctx, epoch, chainID1) + + // channel must be set for vsc timeout to occur, since packets are only sent over channels + k.SetChannelForChain(ctx, chainID2, channelID) + k.SetChainForChannel(ctx, channelID, chainID2) + k.SetVscTimeout(ctx, chainID2, 1, epoch) + + // Call RemoveTimedoutSubscribers + k.RemoveTimedoutSubscribers(ctx, epochIdentifier, epochNumber) + + // Verify results + _, found := k.GetChainInitTimeout(ctx, chainID1) + require.False(t, found) + + _, found = k.GetVscTimeout(ctx, chainID2, 1) + require.False(t, found) +} diff --git a/x/appchain/coordinator/keeper/unbonding.go b/x/appchain/coordinator/keeper/unbonding.go new file mode 100644 index 000000000..239cb2a24 --- /dev/null +++ b/x/appchain/coordinator/keeper/unbonding.go @@ -0,0 +1,108 @@ +package keeper + +import ( + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// AppendConsAddrToPrune appends a consensus address to the list of consensus addresses +// that will be pruned when the validator set update containing the given vscID is matured +// by the chainID. +func (k Keeper) AppendConsAddrToPrune( + ctx sdk.Context, chainID string, vscID uint64, consKey sdk.ConsAddress, +) { + prev := k.GetConsAddrsToPrune(ctx, chainID, vscID) + prev.List = append(prev.List, consKey) + k.SetConsAddrsToPrune(ctx, chainID, vscID, prev) +} + +// GetConsAddrsToPrune returns the list of consensus addresses that will be pruned when the +// validator set update containing the given vscID is matured by the chainID. +func (k Keeper) GetConsAddrsToPrune( + ctx sdk.Context, chainID string, vscID uint64, +) (res types.ConsensusAddresses) { + store := ctx.KVStore(k.storeKey) + key := types.ConsAddrsToPruneKey(chainID, vscID) + k.cdc.MustUnmarshal(store.Get(key), &res) + return res +} + +// SetConsAddrsToPrune sets the list of consensus addresses that will be pruned when the +// validator set update containing the given vscID is matured by the chainID. +func (k Keeper) SetConsAddrsToPrune( + ctx sdk.Context, chainID string, vscID uint64, addrs types.ConsensusAddresses, +) { + store := ctx.KVStore(k.storeKey) + key := types.ConsAddrsToPruneKey(chainID, vscID) + if len(addrs.List) == 0 { + store.Delete(key) + return + } + store.Set(key, k.cdc.MustMarshal(&addrs)) +} + +// SetMaturityVscIDForChainIDConsAddr sets the vscID for the given chainID and consensus address. +// When the vscID matures on the chainID, the consensus address will be pruned. +func (k Keeper) SetMaturityVscIDForChainIDConsAddr( + ctx sdk.Context, chainID string, consAddr sdk.ConsAddress, vscID uint64, +) { + store := ctx.KVStore(k.storeKey) + key := types.MaturityVscIDForChainIDConsAddrKey(chainID, consAddr) + store.Set(key, sdk.Uint64ToBigEndian(vscID)) +} + +// GetMaturityVscIDForChainIDConsAddr returns the vscID for the given chainID and consensus address. +// The vscID is used to prune the consensus address when the vscID matures on the chainID. +func (k Keeper) GetMaturityVscIDForChainIDConsAddr( + ctx sdk.Context, chainID string, consAddr sdk.ConsAddress, +) uint64 { + store := ctx.KVStore(k.storeKey) + key := types.MaturityVscIDForChainIDConsAddrKey(chainID, consAddr) + bz := store.Get(key) + return sdk.BigEndianToUint64(bz) +} + +// DeleteMaturityVscIDForChainIDConsAddr deletes the vscID for the given chainID and consensus address. +// The vscID is used to prune the consensus address when the vscID matures on the chainID. +func (k Keeper) DeleteMaturityVscIDForChainIDConsAddr( + ctx sdk.Context, chainID string, consAddr sdk.ConsAddress, +) { + store := ctx.KVStore(k.storeKey) + key := types.MaturityVscIDForChainIDConsAddrKey(chainID, consAddr) + store.Delete(key) +} + +// AppendUndelegationToRelease appends an undelegation record to the list of undelegations to release +// when the validator set update containing the given vscID is matured by the chainID. +func (k Keeper) AppendUndelegationToRelease( + ctx sdk.Context, chainID string, vscID uint64, recordKey []byte, +) { + prev := k.GetUndelegationsToRelease(ctx, chainID, vscID) + prev.List = append(prev.List, recordKey) + k.SetUndelegationsToRelease(ctx, chainID, vscID, prev) +} + +// GetUndelegationsToRelease returns the list of undelegations to release when the validator set update +// containing the given vscID is matured by the chainID. +func (k Keeper) GetUndelegationsToRelease( + ctx sdk.Context, chainID string, vscID uint64, +) (res types.UndelegationRecordKeys) { + store := ctx.KVStore(k.storeKey) + key := types.UndelegationsToReleaseKey(chainID, vscID) + k.cdc.MustUnmarshal(store.Get(key), &res) + return res +} + +// SetUndelegationsToRelease sets the list of undelegations to release when the validator set update +// containing the given vscID is matured by the chainID. +func (k Keeper) SetUndelegationsToRelease( + ctx sdk.Context, chainID string, vscID uint64, keys types.UndelegationRecordKeys, +) { + store := ctx.KVStore(k.storeKey) + key := types.UndelegationsToReleaseKey(chainID, vscID) + if len(keys.List) == 0 { + store.Delete(key) + return + } + store.Set(key, k.cdc.MustMarshal(&keys)) +} diff --git a/x/appchain/coordinator/keeper/unbonding_test.go b/x/appchain/coordinator/keeper/unbonding_test.go new file mode 100644 index 000000000..52f523c53 --- /dev/null +++ b/x/appchain/coordinator/keeper/unbonding_test.go @@ -0,0 +1,111 @@ +package keeper_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" +) + +func TestAppendGetConsAddrsToPrune(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + consAddr1 := sdk.ConsAddress([]byte("consAddr1")) + consAddr2 := sdk.ConsAddress([]byte("consAddr2")) + + // Test appending and getting + keeper.AppendConsAddrToPrune(ctx, chainID, vscID, consAddr1) + keeper.AppendConsAddrToPrune(ctx, chainID, vscID, consAddr2) + + addrsToPrune := keeper.GetConsAddrsToPrune(ctx, chainID, vscID) + require.Equal(t, 2, len(addrsToPrune.List)) + require.Contains(t, addrsToPrune.List, consAddr1.Bytes()) + require.Contains(t, addrsToPrune.List, consAddr2.Bytes()) +} + +func TestSetGetConsAddrsToPrune(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + consAddrs := types.ConsensusAddresses{ + List: [][]byte{ + sdk.ConsAddress([]byte("consAddr1")).Bytes(), + sdk.ConsAddress([]byte("consAddr2")).Bytes(), + }, + } + + // Test setting and getting + keeper.SetConsAddrsToPrune(ctx, chainID, vscID, consAddrs) + gotAddrs := keeper.GetConsAddrsToPrune(ctx, chainID, vscID) + require.Equal(t, consAddrs, gotAddrs) + + // Test deleting when empty + keeper.SetConsAddrsToPrune(ctx, chainID, vscID, types.ConsensusAddresses{}) + gotAddrs = keeper.GetConsAddrsToPrune(ctx, chainID, vscID) + require.Empty(t, gotAddrs.List) +} + +func TestSetGetDeleteMaturityVscIDForChainIDConsAddr(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + consAddr := sdk.ConsAddress([]byte("consAddr")) + vscID := uint64(1) + + // Test setting and getting + keeper.SetMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr, vscID) + gotVscID := keeper.GetMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr) + require.Equal(t, vscID, gotVscID) + + // Test deleting + keeper.DeleteMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr) + gotVscID = keeper.GetMaturityVscIDForChainIDConsAddr(ctx, chainID, consAddr) + require.Equal(t, uint64(0), gotVscID) +} + +func TestAppendGetUndelegationsToRelease(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + recordKey1 := []byte("recordKey1") + recordKey2 := []byte("recordKey2") + + // Test appending and getting + keeper.AppendUndelegationToRelease(ctx, chainID, vscID, recordKey1) + keeper.AppendUndelegationToRelease(ctx, chainID, vscID, recordKey2) + + undelegationsToRelease := keeper.GetUndelegationsToRelease(ctx, chainID, vscID) + require.Equal(t, 2, len(undelegationsToRelease.List)) + require.Contains(t, undelegationsToRelease.List, recordKey1) + require.Contains(t, undelegationsToRelease.List, recordKey2) +} + +func TestSetGetUndelegationsToRelease(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + vscID := uint64(1) + undelegations := types.UndelegationRecordKeys{ + List: [][]byte{ + []byte("recordKey1"), + []byte("recordKey2"), + }, + } + + // Test setting and getting + keeper.SetUndelegationsToRelease(ctx, chainID, vscID, undelegations) + gotUndelegations := keeper.GetUndelegationsToRelease(ctx, chainID, vscID) + require.Equal(t, undelegations, gotUndelegations) + + // Test deleting when empty + keeper.SetUndelegationsToRelease(ctx, chainID, vscID, types.UndelegationRecordKeys{}) + gotUndelegations = keeper.GetUndelegationsToRelease(ctx, chainID, vscID) + require.Empty(t, gotUndelegations.List) +} diff --git a/x/appchain/coordinator/keeper/validator_set_update.go b/x/appchain/coordinator/keeper/validator_set_update.go new file mode 100644 index 000000000..e802984fb --- /dev/null +++ b/x/appchain/coordinator/keeper/validator_set_update.go @@ -0,0 +1,257 @@ +package keeper + +import ( + "fmt" + + exocoretypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" +) + +// QueueValidatorUpdatesForEpochID queues all the validator updates to be sent to the subscriber +// chains at the end of the epoch. After this function, call SendQueuedValidatorUpdates, which +// will actually send the updates. +func (k Keeper) QueueValidatorUpdatesForEpochID( + ctx sdk.Context, epochID string, epochNumber int64, +) { + // Get all the chains that need to be updated + chainIDs := k.avsKeeper.GetEpochEndChainIDs(ctx, epochID, epochNumber) + for _, chainID := range chainIDs { + cctx, writeCache, err := k.QueueValidatorUpdatesForChainIDInCachedCtx(ctx, chainID) + if err != nil { + k.Logger(ctx).Error( + "error queuing validator updates for chain", + "chainID", chainID, + "error", err, + ) + continue + } + // copy over the events from the cached ctx + ctx.EventManager().EmitEvents(cctx.EventManager().Events()) + writeCache() + } +} + +// QueueValidatorUpdatesForChainIDInCachedCtx is a wrapper function around QueueValidatorUpdatesForChainID. +func (k Keeper) QueueValidatorUpdatesForChainIDInCachedCtx( + ctx sdk.Context, chainID string, +) (cctx sdk.Context, writeCache func(), err error) { + cctx, writeCache = ctx.CacheContext() + err = k.QueueValidatorUpdatesForChainID(cctx, chainID) + return +} + +// QueueValidatorUpdatesForChainID queues all the validator updates to be sent to the subscriber, saving the +// updates as individual validators as well. +func (k Keeper) QueueValidatorUpdatesForChainID( + ctx sdk.Context, chainID string, +) error { + // Get the current validator set for the chain, which is sorted + // by the consensus address (bytes). This sorting is okay to use + prevList := k.GetAllSubscriberValidatorsForChain(ctx, chainID) + // to check whether the new set has a changed vote power, convert to map. + prevMap := make(map[string]int64, len(prevList)) + for _, val := range prevList { + // we are okay to use ConsAddress here even though the bech32 prefix + // is different, because we don't print the address. + prevMap[sdk.ConsAddress(val.ConsAddress).String()] = val.Power + } + operators, keys := k.operatorKeeper.GetActiveOperatorsForChainID(ctx, chainID) + powers, err := k.operatorKeeper.GetVotePowerForChainID( + ctx, operators, chainID, + ) + if err != nil { + k.Logger(ctx).Error( + "error getting vote power for chain", + "chainID", chainID, + "error", err, + ) + // skip this chain, if consecutive failures are reported, it will eventually be + // timed out and then dropped. + return err + } + operators, keys, powers = utils.SortByPower(operators, keys, powers) + maxVals := k.GetMaxValidatorsForChain(ctx, chainID) + // double the capacity assuming that all validators are removed and an entirely new + // set of validators is added. + validatorUpdates := make([]abci.ValidatorUpdate, 0, maxVals*2) + for i := range operators { + if i >= int(maxVals) { + break + } + power := powers[i] + if power < 1 { + break + } + wrappedKey := keys[i] + addressString := wrappedKey.ToConsAddr().String() + prevPower, found := prevMap[addressString] + if found { + if prevPower != power { + validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{ + PubKey: *wrappedKey.ToTmProtoKey(), + Power: power, + }) + } + delete(prevMap, addressString) + validator, err := commontypes.NewSubscriberValidator( + wrappedKey.ToConsAddr(), power, wrappedKey.ToSdkKey(), + ) + if err != nil { + // should never happen, but just in case. + // don't skip the chain though, instead, skip the validator. + continue + } + k.SetSubscriberValidatorForChain(ctx, chainID, validator) + } else { + // new key, add it to the list. + validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{ + PubKey: *wrappedKey.ToTmProtoKey(), + Power: power, + }) + validator, err := commontypes.NewSubscriberValidator( + wrappedKey.ToConsAddr(), power, wrappedKey.ToSdkKey(), + ) + if err != nil { + // should never happen, but just in case. + // don't skip the chain though, instead, skip the validator. + continue + } + k.SetSubscriberValidatorForChain(ctx, chainID, validator) + } + } + // if there is any element in the prevList, which is still in prevMap, that element + // needs to have a vote power of 0 queued. + for _, validator := range prevList { + pubKey, err := validator.ConsPubKey() + if err != nil { + k.Logger(ctx).Error( + "error deserializing consensus public key", + "chainID", chainID, + "error", err, + ) + return err + } + wrappedKey := exocoretypes.NewWrappedConsKeyFromSdkKey(pubKey) + // alternatively, the below could be replaced by wrappedKey.ToConsAddr(), but + // since we generated this address when saving it, we can use it directly. + consAddress := sdk.ConsAddress(validator.ConsAddress) + if _, found := prevMap[consAddress.String()]; found { + validatorUpdates = append(validatorUpdates, abci.ValidatorUpdate{ + PubKey: *wrappedKey.ToTmProtoKey(), + Power: 0, + }) + k.DeleteSubscriberValidatorForChain(ctx, chainID, consAddress) + } + } + // default is 0 for the subscriber genesis. any updates will start with 1. + // increment gets the value of 0, increments it to 1, stores it and returns it. + vscID := k.IncrementVscIDForChain(ctx, chainID) + data := commontypes.NewVscPacketData( + validatorUpdates, vscID, k.ConsumeSlashAcks(ctx, chainID), + ) + k.AppendPendingVscPacket(ctx, chainID, data) + return nil +} + +// SendQueuedValidatorUpdates sends the queued validator set updates to the subscriber chains. +// It only sends them if a client + channel for that chain are set up. Otherwise, no action +// is taken. Since it is called immediately after queuing the updates, it is guaranteed that +// only the updates from the queue (or prior) are sent. In other words, there is no possibility +// for updates from a different epoch will be sent. Hence, we simply iterate over all (active) +// chains. +func (k Keeper) SendQueuedValidatorUpdates(ctx sdk.Context, epochNumber int64) { + chainIDs := k.GetAllChainsWithChannels(ctx) + for _, chainID := range chainIDs { + // a channel is guaranteed to exist. + channelID, _ := k.GetChannelForChain(ctx, chainID) + packets := k.GetPendingVscPackets(ctx, chainID) + k.SendVscPacketsToChain(ctx, chainID, channelID, packets.List, epochNumber) + } +} + +// SendVscPacketsToChain sends the validator set change packets to the subscriber chain. +func (k Keeper) SendVscPacketsToChain( + ctx sdk.Context, chainID string, channelID string, + packets []commontypes.ValidatorSetChangePacketData, + epochNumber int64, +) { + params := k.GetParams(ctx) + for i := range packets { + data := packets[i] + // send packet over IBC + err := commontypes.SendIBCPacket( + ctx, + k.scopedKeeper, + k.channelKeeper, + channelID, // source channel id + commontypes.CoordinatorPortID, // source port id + commontypes.ModuleCdc.MustMarshalJSON(&data), // packet data + params.IBCTimeoutPeriod, + ) + if err != nil { + if clienttypes.ErrClientNotActive.Is(err) { + // IBC client is expired! + // leave the packet data stored to be sent once the client is upgraded + // the client cannot expire during iteration (in the middle of a block) + k.Logger(ctx).Info( + "IBC client is expired, cannot send VSC, leaving packet data stored:", + "chainID", chainID, + "vscID", data.ValsetUpdateID, + ) + return + } + // Not able to send packet over IBC! + k.Logger(ctx).Error( + "cannot send VSC, removing subscriber", + "chainID", chainID, + "vscID", data.ValsetUpdateID, + "err", err.Error(), + ) + // If this happens, most likely the subscriber is malicious; remove it + err := k.StopSubscriberChain(ctx, chainID, true) + if err != nil { + panic(fmt.Errorf("subscriber chain failed to stop: %w", err)) + } + return + } + // even when the epoch identifier is `minute` and that of the `timeoutPeriod` is hour + // the latter is used. this is because the `timeout` runs on a different schedule. + timeoutPeriod := params.VSCTimeoutPeriod + timeoutPeriod.EpochNumber += uint64(epochNumber) + 1 // 1 extra for the ended epoch + k.SetVscTimeout(ctx, chainID, data.ValsetUpdateID, timeoutPeriod) + } + k.SetPendingVscPackets(ctx, chainID, types.ValidatorSetChangePackets{}) +} + +// AppendPendingVscPacket appends a validator set change packet to the pending list, indexed by the chainID. +func (k Keeper) AppendPendingVscPacket(ctx sdk.Context, chainID string, data commontypes.ValidatorSetChangePacketData) { + prev := k.GetPendingVscPackets(ctx, chainID) + prev.List = append(prev.List, data) + k.SetPendingVscPackets(ctx, chainID, prev) +} + +// GetPendingVscPackets gets the pending validator set change packets for a chain. +func (k Keeper) GetPendingVscPackets(ctx sdk.Context, chainID string) types.ValidatorSetChangePackets { + store := ctx.KVStore(k.storeKey) + var data types.ValidatorSetChangePackets + key := types.ChainIDToVscPacketsKey(chainID) + value := store.Get(key) + k.cdc.MustUnmarshal(value, &data) + return data +} + +// SetPendingVscPackets sets the pending validator set change packets for a chain. +func (k Keeper) SetPendingVscPackets(ctx sdk.Context, chainID string, data types.ValidatorSetChangePackets) { + store := ctx.KVStore(k.storeKey) + key := types.ChainIDToVscPacketsKey(chainID) + if len(data.List) == 0 { + store.Delete(key) + } else { + store.Set(key, k.cdc.MustMarshal(&data)) + } +} diff --git a/x/appchain/coordinator/keeper/validator_set_update_test.go b/x/appchain/coordinator/keeper/validator_set_update_test.go new file mode 100644 index 000000000..e5029a930 --- /dev/null +++ b/x/appchain/coordinator/keeper/validator_set_update_test.go @@ -0,0 +1,126 @@ +package keeper_test + +import ( + "testing" + + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + keytypes "github.com/ExocoreNetwork/exocore/types/keys" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" +) + +func TestQueueValidatorUpdatesForEpochID(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + + epochID := "test-epoch" + epochNumber := int64(1) + chainID := "test-chain" + + mocks.AVSKeeper.EXPECT().GetEpochEndChainIDs(ctx, epochID, epochNumber).Return([]string{chainID}) + mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(gomock.Any(), chainID).Return([]sdk.AccAddress{}, []keytypes.WrappedConsKey{}) + mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(gomock.Any(), gomock.Any(), chainID).Return([]int64{}, nil) + + keeper.QueueValidatorUpdatesForEpochID(ctx, epochID, epochNumber) + + // Verify that the validator updates were queued + packets := keeper.GetPendingVscPackets(ctx, chainID) + require.Equal(t, 1, len(packets.List)) +} + +func TestQueueValidatorUpdatesForChainID(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + operator := sdk.AccAddress("testoperator") + pubKey := ed25519.GenPrivKey().PubKey() + wrappedKey := keytypes.NewWrappedConsKeyFromSdkKey(pubKey) + + // set up max validators for chain + keeper.SetMaxValidatorsForChain(ctx, chainID, 100) + + mocks.OperatorKeeper.EXPECT().GetActiveOperatorsForChainID(ctx, chainID).Return([]sdk.AccAddress{operator}, []keytypes.WrappedConsKey{wrappedKey}).Times(1) + mocks.OperatorKeeper.EXPECT().GetVotePowerForChainID(ctx, []sdk.AccAddress{operator}, chainID).Return([]int64{100}, nil) + + err := keeper.QueueValidatorUpdatesForChainID(ctx, chainID) + require.NoError(t, err) + + // Verify that the validator updates were queued + packets := keeper.GetPendingVscPackets(ctx, chainID) + require.Equal(t, 1, len(packets.List)) + require.Equal(t, uint64(1), packets.List[0].ValsetUpdateID) + require.Equal(t, 1, len(packets.List[0].ValidatorUpdates)) +} + +func TestSendQueuedValidatorUpdates(t *testing.T) { + keeper, ctx, mocks := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + channelID := "channel-0" + epochNumber := int64(1) + + keeper.SetChannelForChain(ctx, chainID, channelID) + + key := testutiltx.GenerateConsensusKey() + packet := commontypes.ValidatorSetChangePacketData{ + ValsetUpdateID: 1, + ValidatorUpdates: []abci.ValidatorUpdate{ + {PubKey: *key.ToTmProtoKey(), Power: 100}, + }, + } + keeper.AppendPendingVscPacket(ctx, chainID, packet) + + mocks.ScopedKeeper.EXPECT().GetCapability(gomock.Any(), gomock.Any()).Return(nil, true) + mocks.ChannelKeeper.EXPECT().GetChannel(gomock.Any(), gomock.Any(), gomock.Any()).Return(channeltypes.Channel{}, true) + mocks.ChannelKeeper.EXPECT().SendPacket( + gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), + ).Return(uint64(1), nil) + + keeper.SendQueuedValidatorUpdates(ctx, epochNumber) + + // Verify that the pending packets were sent and cleared + packets := keeper.GetPendingVscPackets(ctx, chainID) + require.Empty(t, packets.List) +} + +func TestAppendGetSetPendingVscPacket(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + key := testutiltx.GenerateConsensusKey() + packet := commontypes.ValidatorSetChangePacketData{ + ValsetUpdateID: 1, + ValidatorUpdates: []abci.ValidatorUpdate{ + {PubKey: *key.ToTmProtoKey(), Power: 100}, + }, + } + + // Test appending + keeper.AppendPendingVscPacket(ctx, chainID, packet) + + // Test getting + packets := keeper.GetPendingVscPackets(ctx, chainID) + require.Equal(t, 1, len(packets.List)) + require.Equal(t, packet, packets.List[0]) + + // Test setting + newPackets := types.ValidatorSetChangePackets{ + List: []commontypes.ValidatorSetChangePacketData{packet, packet}, + } + keeper.SetPendingVscPackets(ctx, chainID, newPackets) + + gotPackets := keeper.GetPendingVscPackets(ctx, chainID) + require.Equal(t, 2, len(gotPackets.List)) + + // Test setting empty (should delete) + keeper.SetPendingVscPackets(ctx, chainID, types.ValidatorSetChangePackets{}) + gotPackets = keeper.GetPendingVscPackets(ctx, chainID) + require.Empty(t, gotPackets.List) +} diff --git a/x/appchain/coordinator/keeper/validators.go b/x/appchain/coordinator/keeper/validators.go new file mode 100644 index 000000000..7ad2d903f --- /dev/null +++ b/x/appchain/coordinator/keeper/validators.go @@ -0,0 +1,83 @@ +package keeper + +import ( + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// SetSubscriberValidatorForChain sets the subscriber validator for a chain. +// Storing this historical information allows us to minimize the number/size of +// validator set updates sent to the subscriber by skipping the keys for which +// there is no change in vote power. +func (k Keeper) SetSubscriberValidatorForChain( + ctx sdk.Context, chainID string, validator commontypes.SubscriberValidator, +) { + store := ctx.KVStore(k.storeKey) + key := types.SubscriberValidatorKey(chainID, validator.ConsAddress) + bz := k.cdc.MustMarshal(&validator) + store.Set(key, bz) +} + +// GetSubscriberValidatorForChain gets the subscriber validator for a chain. +func (k Keeper) GetSubscriberValidatorForChain( + ctx sdk.Context, chainID string, consAddress []byte, +) (validator commontypes.SubscriberValidator, found bool) { + store := ctx.KVStore(k.storeKey) + key := types.SubscriberValidatorKey(chainID, consAddress) + if !store.Has(key) { + return validator, false + } + bz := store.Get(key) + k.cdc.MustUnmarshal(bz, &validator) + return validator, true +} + +// GetAllSubscriberValidatorsForChain gets all subscriber validators for a chain, ordered +// by the consensus address bytes. +func (k Keeper) GetAllSubscriberValidatorsForChain( + ctx sdk.Context, chainID string, +) (validators []commontypes.SubscriberValidator) { + store := ctx.KVStore(k.storeKey) + partialKey := types.SubscriberValidatorKey(chainID, nil) + iterator := sdk.KVStorePrefixIterator(store, partialKey) + defer iterator.Close() + + for ; iterator.Valid(); iterator.Next() { + var validator commontypes.SubscriberValidator + k.cdc.MustUnmarshal(iterator.Value(), &validator) + validators = append(validators, validator) + } + + return validators +} + +// DeleteSubscriberValidatorForChain deletes the subscriber validator for a chain, given +// the consensus address. +func (k Keeper) DeleteSubscriberValidatorForChain( + ctx sdk.Context, chainID string, consAddress []byte, +) { + store := ctx.KVStore(k.storeKey) + key := types.SubscriberValidatorKey(chainID, consAddress) + store.Delete(key) +} + +// SetMaxValidatorsForChain sets the maximum number of validators for a chain. +func (k Keeper) SetMaxValidatorsForChain( + ctx sdk.Context, chainID string, maxValidators uint32, +) { + store := ctx.KVStore(k.storeKey) + key := types.MaxValidatorsKey(chainID) + store.Set(key, sdk.Uint64ToBigEndian(uint64(maxValidators))) +} + +// GetMaxValidatorsForChain gets the maximum number of validators for a chain. +func (k Keeper) GetMaxValidatorsForChain( + ctx sdk.Context, chainID string, +) uint32 { + store := ctx.KVStore(k.storeKey) + key := types.MaxValidatorsKey(chainID) + bz := store.Get(key) + // #nosec G115 // we stored it, we trust it + return uint32(sdk.BigEndianToUint64(bz)) +} diff --git a/x/appchain/coordinator/keeper/validators_test.go b/x/appchain/coordinator/keeper/validators_test.go new file mode 100644 index 000000000..4056eabad --- /dev/null +++ b/x/appchain/coordinator/keeper/validators_test.go @@ -0,0 +1,81 @@ +package keeper_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + testkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" +) + +func TestSetGetSubscriberValidatorForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + pubKey := ed25519.GenPrivKey().PubKey() + consAddr := sdk.ConsAddress(pubKey.Address()) + validator, err := commontypes.NewSubscriberValidator(consAddr, 100, pubKey) + require.NoError(t, err) + + // Test setting and getting + keeper.SetSubscriberValidatorForChain(ctx, chainID, validator) + gotValidator, found := keeper.GetSubscriberValidatorForChain(ctx, chainID, consAddr) + + require.True(t, found) + require.Equal(t, validator, gotValidator) + + // Test getting non-existent validator + _, found = keeper.GetSubscriberValidatorForChain(ctx, chainID, sdk.ConsAddress("non-existent")) + require.False(t, found) +} + +func TestGetAllSubscriberValidatorsForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + validators := []commontypes.SubscriberValidator{} + + for i := 0; i < 3; i++ { + pubKey := ed25519.GenPrivKey().PubKey() + consAddr := sdk.ConsAddress(pubKey.Address()) + validator, err := commontypes.NewSubscriberValidator(consAddr, 100+int64(i), pubKey) + require.NoError(t, err) + validators = append(validators, validator) + keeper.SetSubscriberValidatorForChain(ctx, chainID, validator) + } + + gotValidators := keeper.GetAllSubscriberValidatorsForChain(ctx, chainID) + require.Equal(t, len(validators), len(gotValidators)) + require.ElementsMatch(t, validators, gotValidators) +} + +func TestDeleteSubscriberValidatorForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + pubKey := ed25519.GenPrivKey().PubKey() + consAddr := sdk.ConsAddress(pubKey.Address()) + validator, err := commontypes.NewSubscriberValidator(consAddr, 100, pubKey) + require.NoError(t, err) + + keeper.SetSubscriberValidatorForChain(ctx, chainID, validator) + keeper.DeleteSubscriberValidatorForChain(ctx, chainID, consAddr) + + _, found := keeper.GetSubscriberValidatorForChain(ctx, chainID, consAddr) + require.False(t, found) +} + +func TestSetGetMaxValidatorsForChain(t *testing.T) { + keeper, ctx, _ := testkeeper.NewCoordinatorKeeper(t) + + chainID := "test-chain" + maxValidators := uint32(100) + + keeper.SetMaxValidatorsForChain(ctx, chainID, maxValidators) + gotMaxValidators := keeper.GetMaxValidatorsForChain(ctx, chainID) + + require.Equal(t, maxValidators, gotMaxValidators) +} diff --git a/x/appchain/coordinator/module.go b/x/appchain/coordinator/module.go index efaed5a7e..c823ae3f1 100644 --- a/x/appchain/coordinator/module.go +++ b/x/appchain/coordinator/module.go @@ -1,4 +1,4 @@ -package dogfood +package coordinator import ( "context" diff --git a/x/appchain/coordinator/module_ibc.go b/x/appchain/coordinator/module_ibc.go new file mode 100644 index 000000000..caa8932a7 --- /dev/null +++ b/x/appchain/coordinator/module_ibc.go @@ -0,0 +1,316 @@ +package coordinator + +import ( + "fmt" + + errorsmod "cosmossdk.io/errors" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/coordinator/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +// IBCModule is the IBC module for the subscriber module. +type IBCModule struct { + keeper keeper.Keeper +} + +// interface guard +var _ porttypes.IBCModule = IBCModule{} + +// NewIBCModule creates a new IBCModule instance +func NewIBCModule(k keeper.Keeper) IBCModule { + return IBCModule{ + keeper: k, + } +} + +// OnChanOpenInit implements the IBCModule interface +func (im IBCModule) OnChanOpenInit( + ctx sdk.Context, + _ channeltypes.Order, + _ []string, + _ string, + _ string, + _ *capabilitytypes.Capability, + _ channeltypes.Counterparty, + version string, +) (string, error) { + im.keeper.Logger(ctx).Debug( + "OnChanOpenInit", + ) + return version, errorsmod.Wrap( + commontypes.ErrInvalidChannelFlow, + "channel handshake must be initiated by subscriber chain", + ) +} + +// OnChanOpenTry implements the IBCModule interface +func (im IBCModule) OnChanOpenTry( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + counterpartyVersion string, +) (string, error) { + im.keeper.Logger(ctx).Debug( + "OnChanOpenTry", + ) + // channel ordering + if order != channeltypes.ORDERED { + return "", errorsmod.Wrapf( + channeltypes.ErrInvalidChannelOrdering, + "expected %s channel, got %s", channeltypes.ORDERED, order, + ) + } + + // the channel's portId should match the module's + boundPort := im.keeper.GetPort(ctx) + if boundPort != portID { + return "", errorsmod.Wrapf( + porttypes.ErrInvalidPort, + "invalid port: %s, expected %s", portID, boundPort, + ) + } + + if counterpartyVersion != commontypes.Version { + return "", errorsmod.Wrapf( + commontypes.ErrInvalidVersion, + "invalid counterparty version: got: %s, expected %s", + counterpartyVersion, + commontypes.Version, + ) + } + + if counterparty.PortId != commontypes.SubscriberPortID { + return "", errorsmod.Wrapf( + porttypes.ErrInvalidPort, + "invalid counterparty port Id: got %s, expected %s", + counterparty.PortId, + commontypes.SubscriberPortID, + ) + } + + // Claim channel capability + if err := im.keeper.ClaimCapability( + ctx, chanCap, host.ChannelCapabilityPath(portID, channelID), + ); err != nil { + return "", err + } + + if err := im.keeper.VerifySubscriberChain( + ctx, channelID, connectionHops, + ); err != nil { + return "", err + } + + md := commontypes.HandshakeMetadata{ + CoordinatorFeePoolAddr: im.keeper.GetSubscriberRewardsPoolAddressStr(ctx), + Version: commontypes.Version, + } + // we can use `MustMarshal` for data that we create + mdBz := commontypes.ModuleCdc.MustMarshal(&md) + return string(mdBz), nil +} + +// OnChanOpenAck implements the IBCModule interface +func (im IBCModule) OnChanOpenAck( + ctx sdk.Context, + _ string, + _ string, + _ string, + _ string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanOpenAck", + ) + return errorsmod.Wrap( + commontypes.ErrInvalidChannelFlow, + "channel handshake must be initiated by subscriber chain", + ) +} + +// OnChanOpenConfirm implements the IBCModule interface +func (im IBCModule) OnChanOpenConfirm( + ctx sdk.Context, _ string, dstChannelID string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanOpenConfirm", + ) + err := im.keeper.SetSubscriberChain(ctx, dstChannelID) + if err != nil { + return err + } + return nil +} + +// OnChanCloseInit implements the IBCModule interface +func (im IBCModule) OnChanCloseInit( + ctx sdk.Context, _ string, _ string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanCloseInit", + ) + // Disallow user-initiated channel closing for channels + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") +} + +// OnChanCloseConfirm implements the IBCModule interface +func (im IBCModule) OnChanCloseConfirm( + ctx sdk.Context, _ string, _ string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanCloseConfirm", + ) + return nil +} + +// OnRecvPacket implements the IBCModule interface +func (im IBCModule) OnRecvPacket( + ctx sdk.Context, packet channeltypes.Packet, _ sdk.AccAddress, +) ibcexported.Acknowledgement { + im.keeper.Logger(ctx).Debug( + "OnRecvPacket", + ) + + var ( + ack ibcexported.Acknowledgement + data commontypes.SubscriberPacketData + err error + res []byte + ) + + // (1) Since this is a packet originating from the subscriber, we cannot use MustUnmarshal, + // because such packets are not guaranteed to be correctly formed. + // (2) When the subscriber chain marshals the data, it should use MarshalJSON. + if unmarshalErr := commontypes.ModuleCdc.UnmarshalJSON( + packet.GetData(), &data, + ); unmarshalErr != nil { + im.keeper.Logger(ctx).Error( + "cannot unmarshal subscriber packet data", + "error", unmarshalErr, + ) + err = sdkerrors.ErrInvalidType.Wrapf( + "cannot unmarshal coordinator packet data: %s", unmarshalErr, + ) + } else { + switch data.Type { + case commontypes.SlashPacket: + im.keeper.Logger(ctx).Debug( + "OnRecvSlashPacket", + "packet data", data, + ) + res, err = im.keeper.OnRecvSlashPacket(ctx, packet, *data.GetSlashPacketData()) + case commontypes.VscMaturedPacket: + im.keeper.Logger(ctx).Debug( + "OnRecvVscMaturedPacket", + "packet data", data, + ) + // no need to send an ack for this packet type + err = im.keeper.OnRecvVscMaturedPacket(ctx, packet, *data.GetVscMaturedPacketData()) + default: + err = sdkerrors.ErrInvalidType.Wrapf("unknown packet type: %s", data.Type) + } + } + switch { + case err != nil: + ack = commontypes.NewErrorAcknowledgementWithLog(ctx, err) + case res != nil: + ack = commontypes.NewResultAcknowledgementWithLog(ctx, res) + default: + ack = commontypes.NewResultAcknowledgementWithLog(ctx, nil) + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(commontypes.AttributeKeyAckSuccess, fmt.Sprintf("%t", ack.Success())), + ), + ) + + return ack +} + +// OnAcknowledgementPacket implements the IBCModule interface +func (im IBCModule) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + _ sdk.AccAddress, +) error { + im.keeper.Logger(ctx).Debug( + "OnAcknowledgementPacket", + ) + // same as before, this packet is sent by the subscriber, so we cannot use MustUnmarshal + var ack channeltypes.Acknowledgement + if err := commontypes.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil { + return errorsmod.Wrapf( + sdkerrors.ErrUnknownRequest, + "cannot unmarshal packet acknowledgement: %s", err, + ) + } + + if err := im.keeper.OnAcknowledgementPacket(ctx, packet, ack); err != nil { + return err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(commontypes.AttributeKeyAck, ack.String()), + ), + ) + + switch resp := ack.Response.(type) { + case *channeltypes.Acknowledgement_Result: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(commontypes.AttributeKeyAckSuccess, string(resp.Result)), + ), + ) + case *channeltypes.Acknowledgement_Error: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(commontypes.AttributeKeyAckError, resp.Error), + ), + ) + } + + return nil +} + +// OnTimeoutPacket implements the IBCModule interface +func (im IBCModule) OnTimeoutPacket( + ctx sdk.Context, + packet channeltypes.Packet, + _ sdk.AccAddress, +) error { + im.keeper.Logger(ctx).Debug( + "OnTimeoutPacket", + ) + if err := im.keeper.OnTimeoutPacket(ctx, packet); err != nil { + return err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypeTimeout, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + ), + ) + + return nil +} diff --git a/x/appchain/coordinator/types/coordinator.pb.go b/x/appchain/coordinator/types/coordinator.pb.go index ba10cad72..c986f2f56 100644 --- a/x/appchain/coordinator/types/coordinator.pb.go +++ b/x/appchain/coordinator/types/coordinator.pb.go @@ -5,6 +5,7 @@ package types import ( fmt "fmt" + types "github.com/ExocoreNetwork/exocore/x/appchain/common/types" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" io "io" @@ -116,9 +117,150 @@ func (m *ChainIDs) GetList() []string { return nil } +// ConsensusAddresses is a list of consensus addresses. +type ConsensusAddresses struct { + // list is the list of consensus addresses. + List [][]byte `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (m *ConsensusAddresses) Reset() { *m = ConsensusAddresses{} } +func (m *ConsensusAddresses) String() string { return proto.CompactTextString(m) } +func (*ConsensusAddresses) ProtoMessage() {} +func (*ConsensusAddresses) Descriptor() ([]byte, []int) { + return fileDescriptor_fb7bb04617dc0e61, []int{2} +} +func (m *ConsensusAddresses) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConsensusAddresses) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ConsensusAddresses.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ConsensusAddresses) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsensusAddresses.Merge(m, src) +} +func (m *ConsensusAddresses) XXX_Size() int { + return m.Size() +} +func (m *ConsensusAddresses) XXX_DiscardUnknown() { + xxx_messageInfo_ConsensusAddresses.DiscardUnknown(m) +} + +var xxx_messageInfo_ConsensusAddresses proto.InternalMessageInfo + +func (m *ConsensusAddresses) GetList() [][]byte { + if m != nil { + return m.List + } + return nil +} + +// ValidatorSetChangePackets is a helper structure to store a list of packets +type ValidatorSetChangePackets struct { + // list is the list of packets to be sent to the subscriber chain. + List []types.ValidatorSetChangePacketData `protobuf:"bytes,1,rep,name=list,proto3" json:"list"` +} + +func (m *ValidatorSetChangePackets) Reset() { *m = ValidatorSetChangePackets{} } +func (m *ValidatorSetChangePackets) String() string { return proto.CompactTextString(m) } +func (*ValidatorSetChangePackets) ProtoMessage() {} +func (*ValidatorSetChangePackets) Descriptor() ([]byte, []int) { + return fileDescriptor_fb7bb04617dc0e61, []int{3} +} +func (m *ValidatorSetChangePackets) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatorSetChangePackets) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValidatorSetChangePackets.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValidatorSetChangePackets) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatorSetChangePackets.Merge(m, src) +} +func (m *ValidatorSetChangePackets) XXX_Size() int { + return m.Size() +} +func (m *ValidatorSetChangePackets) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatorSetChangePackets.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatorSetChangePackets proto.InternalMessageInfo + +func (m *ValidatorSetChangePackets) GetList() []types.ValidatorSetChangePacketData { + if m != nil { + return m.List + } + return nil +} + +// UndelegationRecordKeys is a collection of undelegation record keys. +type UndelegationRecordKeys struct { + // list is the list of undelegation record keys. + List [][]byte `protobuf:"bytes,1,rep,name=list,proto3" json:"list,omitempty"` +} + +func (m *UndelegationRecordKeys) Reset() { *m = UndelegationRecordKeys{} } +func (m *UndelegationRecordKeys) String() string { return proto.CompactTextString(m) } +func (*UndelegationRecordKeys) ProtoMessage() {} +func (*UndelegationRecordKeys) Descriptor() ([]byte, []int) { + return fileDescriptor_fb7bb04617dc0e61, []int{4} +} +func (m *UndelegationRecordKeys) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UndelegationRecordKeys) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UndelegationRecordKeys.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UndelegationRecordKeys) XXX_Merge(src proto.Message) { + xxx_messageInfo_UndelegationRecordKeys.Merge(m, src) +} +func (m *UndelegationRecordKeys) XXX_Size() int { + return m.Size() +} +func (m *UndelegationRecordKeys) XXX_DiscardUnknown() { + xxx_messageInfo_UndelegationRecordKeys.DiscardUnknown(m) +} + +var xxx_messageInfo_UndelegationRecordKeys proto.InternalMessageInfo + +func (m *UndelegationRecordKeys) GetList() [][]byte { + if m != nil { + return m.List + } + return nil +} + func init() { proto.RegisterType((*PendingSubscriberChainRequests)(nil), "exocore.appchain.coordinator.v1.PendingSubscriberChainRequests") proto.RegisterType((*ChainIDs)(nil), "exocore.appchain.coordinator.v1.ChainIDs") + proto.RegisterType((*ConsensusAddresses)(nil), "exocore.appchain.coordinator.v1.ConsensusAddresses") + proto.RegisterType((*ValidatorSetChangePackets)(nil), "exocore.appchain.coordinator.v1.ValidatorSetChangePackets") + proto.RegisterType((*UndelegationRecordKeys)(nil), "exocore.appchain.coordinator.v1.UndelegationRecordKeys") } func init() { @@ -126,23 +268,29 @@ func init() { } var fileDescriptor_fb7bb04617dc0e61 = []byte{ - // 254 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0x4c, 0xad, 0xc8, 0x4f, - 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2c, 0x28, 0x48, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0x4f, 0xce, 0xcf, - 0x2f, 0x4a, 0xc9, 0xcc, 0x4b, 0x2c, 0xc9, 0x2f, 0xd2, 0x2f, 0x33, 0x44, 0xe6, 0xea, 0x15, 0x14, - 0xe5, 0x97, 0xe4, 0x0b, 0xc9, 0x43, 0xb5, 0xe8, 0xc1, 0xb4, 0xe8, 0x21, 0xab, 0x29, 0x33, 0x94, - 0xd2, 0x20, 0x64, 0x66, 0x49, 0x05, 0xc4, 0x28, 0x29, 0x91, 0xf4, 0xfc, 0xf4, 0x7c, 0x30, 0x53, - 0x1f, 0xc4, 0x82, 0x88, 0x2a, 0x55, 0x73, 0xc9, 0x05, 0xa4, 0xe6, 0xa5, 0x64, 0xe6, 0xa5, 0x07, - 0x97, 0x26, 0x15, 0x27, 0x17, 0x65, 0x26, 0xa5, 0x16, 0x39, 0x83, 0xcc, 0x09, 0x4a, 0x2d, 0x2c, - 0x4d, 0x2d, 0x2e, 0x29, 0x16, 0x8a, 0xe4, 0x62, 0xc9, 0xc9, 0x2c, 0x2e, 0x91, 0x60, 0x54, 0x60, - 0xd6, 0xe0, 0x36, 0xb2, 0xd7, 0x23, 0xe0, 0x22, 0xbd, 0xa0, 0xd4, 0xf4, 0xcc, 0xe2, 0x92, 0xd4, - 0x22, 0xec, 0xe6, 0x39, 0xb1, 0x9c, 0xb8, 0x27, 0xcf, 0x10, 0x04, 0x36, 0x52, 0x49, 0x8e, 0x8b, - 0x03, 0x2c, 0xe7, 0xe9, 0x52, 0x2c, 0x24, 0x84, 0x64, 0x0d, 0x27, 0x44, 0xde, 0x29, 0xe2, 0xc4, - 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, - 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0xec, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, - 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0x5d, 0x21, 0x0e, 0xf2, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0xd6, - 0x87, 0x05, 0x48, 0x05, 0xf6, 0x20, 0x29, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0xfb, 0xde, - 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x4d, 0xb2, 0xe2, 0x02, 0x93, 0x01, 0x00, 0x00, + // 352 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x91, 0xcf, 0x4a, 0xc3, 0x40, + 0x10, 0x87, 0x13, 0x2c, 0xa2, 0xd1, 0x53, 0x10, 0xd1, 0x1e, 0xb6, 0x52, 0x10, 0x7a, 0x90, 0x84, + 0xea, 0xc5, 0x93, 0x62, 0x5b, 0x0f, 0x22, 0x48, 0x49, 0x51, 0xd4, 0xdb, 0x26, 0x19, 0xb6, 0x4b, + 0xdb, 0x9d, 0xb8, 0xbb, 0xe9, 0x1f, 0x7c, 0x09, 0x1f, 0xab, 0xc7, 0x1e, 0x3d, 0x89, 0xb4, 0x2f, + 0x22, 0x9b, 0xa6, 0xd0, 0x42, 0xa4, 0xb7, 0x4d, 0xf6, 0x37, 0xdf, 0x7c, 0xb3, 0xe3, 0xd4, 0x61, + 0x8c, 0x11, 0x4a, 0xf0, 0x69, 0x92, 0x44, 0x5d, 0xca, 0x85, 0x1f, 0x21, 0xca, 0x98, 0x0b, 0xaa, + 0x51, 0xfa, 0xc3, 0xfa, 0xfa, 0xa7, 0x97, 0x48, 0xd4, 0xe8, 0x56, 0xf2, 0x12, 0x6f, 0x55, 0xe2, + 0xad, 0x67, 0x86, 0xf5, 0xf2, 0x79, 0x01, 0x73, 0x30, 0x40, 0x61, 0x70, 0x23, 0x2e, 0x61, 0xc9, + 0x29, 0xd7, 0xb6, 0xb5, 0xd6, 0xe3, 0x3c, 0x79, 0xc4, 0x90, 0x61, 0x76, 0xf4, 0xcd, 0x69, 0xf9, + 0xb7, 0xfa, 0xe9, 0x90, 0x36, 0x88, 0x98, 0x0b, 0xd6, 0x49, 0x43, 0x15, 0x49, 0x1e, 0x82, 0x6c, + 0x1a, 0x4e, 0x00, 0x1f, 0x29, 0x28, 0xad, 0xdc, 0x37, 0xa7, 0xd4, 0xe7, 0x4a, 0x9f, 0xd8, 0x67, + 0x3b, 0xb5, 0x83, 0xcb, 0x5b, 0x6f, 0x8b, 0xb8, 0x17, 0x00, 0xe3, 0x4a, 0x83, 0x2c, 0xe6, 0x35, + 0x4a, 0xd3, 0x9f, 0x8a, 0x15, 0x64, 0xc8, 0x2a, 0x71, 0xf6, 0xb2, 0xbb, 0x87, 0x96, 0x72, 0xdd, + 0xb5, 0x36, 0xfb, 0xf9, 0x7d, 0xcd, 0x71, 0x9b, 0x28, 0x14, 0x08, 0x95, 0xaa, 0xbb, 0x38, 0x96, + 0xa0, 0x14, 0x6c, 0x26, 0x0f, 0xf3, 0x24, 0x3a, 0xa7, 0x2f, 0xb4, 0xcf, 0x63, 0x23, 0xd1, 0x01, + 0xdd, 0xec, 0x52, 0xc1, 0xa0, 0x4d, 0xa3, 0x1e, 0x68, 0xe5, 0x06, 0x1b, 0x13, 0x5c, 0x17, 0x4d, + 0x60, 0x5e, 0xd6, 0xc8, 0xff, 0x07, 0x69, 0x51, 0x4d, 0x37, 0xd4, 0x2f, 0x9c, 0xe3, 0x67, 0x11, + 0x43, 0x1f, 0x18, 0xd5, 0x1c, 0x45, 0x00, 0x11, 0xca, 0xf8, 0x11, 0x26, 0x85, 0x7a, 0x8d, 0xd7, + 0xe9, 0x9c, 0xd8, 0xb3, 0x39, 0xb1, 0x7f, 0xe7, 0xc4, 0xfe, 0x5a, 0x10, 0x6b, 0xb6, 0x20, 0xd6, + 0xf7, 0x82, 0x58, 0xef, 0x37, 0x8c, 0xeb, 0x6e, 0x1a, 0x1a, 0x05, 0xff, 0x7e, 0xe9, 0xf5, 0x04, + 0x7a, 0x84, 0xb2, 0xe7, 0xaf, 0x36, 0x3b, 0x2e, 0xde, 0xad, 0x9e, 0x24, 0xa0, 0xc2, 0xdd, 0x6c, + 0x8d, 0x57, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xfc, 0xdf, 0xf1, 0x83, 0x02, 0x00, 0x00, } func (m *PendingSubscriberChainRequests) Marshal() (dAtA []byte, err error) { @@ -214,6 +362,107 @@ func (m *ChainIDs) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ConsensusAddresses) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConsensusAddresses) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConsensusAddresses) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.List) > 0 { + for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.List[iNdEx]) + copy(dAtA[i:], m.List[iNdEx]) + i = encodeVarintCoordinator(dAtA, i, uint64(len(m.List[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ValidatorSetChangePackets) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatorSetChangePackets) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatorSetChangePackets) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.List) > 0 { + for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.List[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCoordinator(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *UndelegationRecordKeys) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UndelegationRecordKeys) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UndelegationRecordKeys) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.List) > 0 { + for iNdEx := len(m.List) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.List[iNdEx]) + copy(dAtA[i:], m.List[iNdEx]) + i = encodeVarintCoordinator(dAtA, i, uint64(len(m.List[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintCoordinator(dAtA []byte, offset int, v uint64) int { offset -= sovCoordinator(v) base := offset @@ -255,6 +504,51 @@ func (m *ChainIDs) Size() (n int) { return n } +func (m *ConsensusAddresses) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.List) > 0 { + for _, b := range m.List { + l = len(b) + n += 1 + l + sovCoordinator(uint64(l)) + } + } + return n +} + +func (m *ValidatorSetChangePackets) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.List) > 0 { + for _, e := range m.List { + l = e.Size() + n += 1 + l + sovCoordinator(uint64(l)) + } + } + return n +} + +func (m *UndelegationRecordKeys) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.List) > 0 { + for _, b := range m.List { + l = len(b) + n += 1 + l + sovCoordinator(uint64(l)) + } + } + return n +} + func sovCoordinator(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -427,6 +721,254 @@ func (m *ChainIDs) Unmarshal(dAtA []byte) error { } return nil } +func (m *ConsensusAddresses) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsensusAddresses: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsensusAddresses: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoordinator + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCoordinator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.List = append(m.List, make([]byte, postIndex-iNdEx)) + copy(m.List[len(m.List)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCoordinator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCoordinator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatorSetChangePackets) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatorSetChangePackets: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatorSetChangePackets: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCoordinator + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCoordinator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.List = append(m.List, types.ValidatorSetChangePacketData{}) + if err := m.List[len(m.List)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCoordinator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCoordinator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UndelegationRecordKeys) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UndelegationRecordKeys: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UndelegationRecordKeys: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field List", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCoordinator + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthCoordinator + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthCoordinator + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.List = append(m.List, make([]byte, postIndex-iNdEx)) + copy(m.List[len(m.List)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCoordinator(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCoordinator + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipCoordinator(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/appchain/coordinator/types/errors.go b/x/appchain/coordinator/types/errors.go index 318378878..40b8b7a44 100644 --- a/x/appchain/coordinator/types/errors.go +++ b/x/appchain/coordinator/types/errors.go @@ -9,15 +9,37 @@ const ( errCodeNilRequest errCodeDuplicateSubChain errCodeNoOperators + errCodeInvalidSubscriberClient + errCodeUnknownSubscriberChannelID ) var ( - // ErrInvalidRegistrationParams is the error returned when the subscriber chain registration params are invalid - ErrInvalidRegistrationParams = errorsmod.Register(ModuleName, errCodeInvalidParams, "invalid registration params") + // ErrInvalidRegistrationParams is the error returned when the subscriber chain + // registration params are invalid + ErrInvalidRegistrationParams = errorsmod.Register( + ModuleName, errCodeInvalidParams, "invalid registration params", + ) // ErrNilRequest is the error returned when the request is nil - ErrNilRequest = errorsmod.Register(ModuleName, errCodeNilRequest, "nil request") - // ErrDuplicateSubChain is the error returned when a client for the chain already exists - ErrDuplicateSubChain = errorsmod.Register(ModuleName, errCodeDuplicateSubChain, "subscriber chain already exists") + ErrNilRequest = errorsmod.Register( + ModuleName, errCodeNilRequest, "nil request", + ) + // ErrDuplicateSubChain is the error returned when + // a client for the chain already exists + ErrDuplicateSubChain = errorsmod.Register( + ModuleName, errCodeDuplicateSubChain, "subscriber chain already exists", + ) // ErrNoOperators is the error returned when no qualified operators are available - ErrNoOperators = errorsmod.Register(ModuleName, errCodeNoOperators, "no operators available") + ErrNoOperators = errorsmod.Register( + ModuleName, errCodeNoOperators, "no operators available", + ) + // ErrInvalidSubscriberClient is the error returned when the + // client for the subscriber chain is invalid + ErrInvalidSubscriberClient = errorsmod.Register( + ModuleName, errCodeInvalidSubscriberClient, "invalid subscriber client", + ) + // ErrUnknownSubscriberChannelID is the error returned when the channel ID + // corresponding to a message from the subscriber chain is unknown + ErrUnknownSubscriberChannelID = errorsmod.Register( + ModuleName, errCodeUnknownSubscriberChannelID, "unknown subscriber channel ID", + ) ) diff --git a/x/appchain/coordinator/types/expected_keepers.go b/x/appchain/coordinator/types/expected_keepers.go index a011ac96e..9c2c60097 100644 --- a/x/appchain/coordinator/types/expected_keepers.go +++ b/x/appchain/coordinator/types/expected_keepers.go @@ -7,14 +7,18 @@ import ( avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" "github.com/ethereum/go-ethereum/common" ) // AVSKeeper represents the expected keeper interface for the AVS module. type AVSKeeper interface { - RegisterAVSWithChainID(sdk.Context, *avstypes.AVSRegisterOrDeregisterParams) (common.Address, error) - IsAVSByChainID(sdk.Context, string) (bool, common.Address) - DeleteAVSInfo(sdk.Context, common.Address) error + RegisterAVSWithChainID( + sdk.Context, *avstypes.AVSRegisterOrDeregisterParams, + ) (common.Address, error) + IsAVSByChainID(sdk.Context, string) (bool, string) + DeleteAVSInfo(sdk.Context, string) error + GetEpochEndChainIDs(sdk.Context, string, int64) []string } // EpochsKeeper represents the expected keeper interface for the epochs module. @@ -29,6 +33,30 @@ type StakingKeeper interface { // OperatorKeeper represents the expected keeper interface for the operator module. type OperatorKeeper interface { + GetOperatorConsKeyForChainID(sdk.Context, sdk.AccAddress, string) (bool, keytypes.WrappedConsKey, error) + IsOperatorRemovingKeyFromChainID(sdk.Context, sdk.AccAddress, string) bool GetActiveOperatorsForChainID(sdk.Context, string) ([]sdk.AccAddress, []keytypes.WrappedConsKey) GetVotePowerForChainID(sdk.Context, []sdk.AccAddress, string) ([]int64, error) + GetOperatorAddressForChainIDAndConsAddr( + sdk.Context, string, sdk.ConsAddress, + ) (bool, sdk.AccAddress) + DeleteOperatorAddressForChainIDAndConsAddr( + ctx sdk.Context, chainID string, consAddr sdk.ConsAddress, + ) + // compared to slashing forwarded by Tendermint, this function doesn't have the vote power parameter. + // instead it contains the avs address for which the slashing is being executed. the interface is + // subject to change during implementation. It should check that the validator isn't permanently + // kicked, and it should jail the validator for the provided duration. + ApplySlashForHeight( + ctx sdk.Context, operatorAccAddress sdk.AccAddress, avsAddress string, + height uint64, fraction sdk.Dec, infraction stakingtypes.Infraction, + jailDuration time.Duration, + ) error + GetChainIDsForOperator(sdk.Context, string) ([]string, error) +} + +// DelegationKeeper represents the expected keeper interface for the delegation module. +type DelegationKeeper interface { + IncrementUndelegationHoldCount(sdk.Context, []byte) error + DecrementUndelegationHoldCount(sdk.Context, []byte) error } diff --git a/x/appchain/coordinator/types/expected_keepers_mocks.go b/x/appchain/coordinator/types/expected_keepers_mocks.go new file mode 100644 index 000000000..fc222c2c7 --- /dev/null +++ b/x/appchain/coordinator/types/expected_keepers_mocks.go @@ -0,0 +1,369 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: x/appchain/coordinator/types/expected_keepers.go +// +// Generated by this command: +// +// mockgen -source=x/appchain/coordinator/types/expected_keepers.go -destination=x/appchain/coordinator/types/expected_keepers_mocks.go -package=types +// + +// Package types is a generated GoMock package. +package types + +import ( + reflect "reflect" + time "time" + + keys "github.com/ExocoreNetwork/exocore/types/keys" + types "github.com/ExocoreNetwork/exocore/x/avs/types" + types0 "github.com/ExocoreNetwork/exocore/x/epochs/types" + types1 "github.com/cosmos/cosmos-sdk/types" + types2 "github.com/cosmos/cosmos-sdk/x/staking/types" + common "github.com/ethereum/go-ethereum/common" + gomock "go.uber.org/mock/gomock" +) + +// MockAVSKeeper is a mock of AVSKeeper interface. +type MockAVSKeeper struct { + ctrl *gomock.Controller + recorder *MockAVSKeeperMockRecorder +} + +// MockAVSKeeperMockRecorder is the mock recorder for MockAVSKeeper. +type MockAVSKeeperMockRecorder struct { + mock *MockAVSKeeper +} + +// NewMockAVSKeeper creates a new mock instance. +func NewMockAVSKeeper(ctrl *gomock.Controller) *MockAVSKeeper { + mock := &MockAVSKeeper{ctrl: ctrl} + mock.recorder = &MockAVSKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockAVSKeeper) EXPECT() *MockAVSKeeperMockRecorder { + return m.recorder +} + +// DeleteAVSInfo mocks base method. +func (m *MockAVSKeeper) DeleteAVSInfo(arg0 types1.Context, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteAVSInfo", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteAVSInfo indicates an expected call of DeleteAVSInfo. +func (mr *MockAVSKeeperMockRecorder) DeleteAVSInfo(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteAVSInfo", reflect.TypeOf((*MockAVSKeeper)(nil).DeleteAVSInfo), arg0, arg1) +} + +// GetEpochEndChainIDs mocks base method. +func (m *MockAVSKeeper) GetEpochEndChainIDs(arg0 types1.Context, arg1 string, arg2 int64) []string { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEpochEndChainIDs", arg0, arg1, arg2) + ret0, _ := ret[0].([]string) + return ret0 +} + +// GetEpochEndChainIDs indicates an expected call of GetEpochEndChainIDs. +func (mr *MockAVSKeeperMockRecorder) GetEpochEndChainIDs(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochEndChainIDs", reflect.TypeOf((*MockAVSKeeper)(nil).GetEpochEndChainIDs), arg0, arg1, arg2) +} + +// IsAVSByChainID mocks base method. +func (m *MockAVSKeeper) IsAVSByChainID(arg0 types1.Context, arg1 string) (bool, string) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsAVSByChainID", arg0, arg1) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(string) + return ret0, ret1 +} + +// IsAVSByChainID indicates an expected call of IsAVSByChainID. +func (mr *MockAVSKeeperMockRecorder) IsAVSByChainID(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsAVSByChainID", reflect.TypeOf((*MockAVSKeeper)(nil).IsAVSByChainID), arg0, arg1) +} + +// RegisterAVSWithChainID mocks base method. +func (m *MockAVSKeeper) RegisterAVSWithChainID(arg0 types1.Context, arg1 *types.AVSRegisterOrDeregisterParams) (common.Address, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "RegisterAVSWithChainID", arg0, arg1) + ret0, _ := ret[0].(common.Address) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// RegisterAVSWithChainID indicates an expected call of RegisterAVSWithChainID. +func (mr *MockAVSKeeperMockRecorder) RegisterAVSWithChainID(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RegisterAVSWithChainID", reflect.TypeOf((*MockAVSKeeper)(nil).RegisterAVSWithChainID), arg0, arg1) +} + +// MockEpochsKeeper is a mock of EpochsKeeper interface. +type MockEpochsKeeper struct { + ctrl *gomock.Controller + recorder *MockEpochsKeeperMockRecorder +} + +// MockEpochsKeeperMockRecorder is the mock recorder for MockEpochsKeeper. +type MockEpochsKeeperMockRecorder struct { + mock *MockEpochsKeeper +} + +// NewMockEpochsKeeper creates a new mock instance. +func NewMockEpochsKeeper(ctrl *gomock.Controller) *MockEpochsKeeper { + mock := &MockEpochsKeeper{ctrl: ctrl} + mock.recorder = &MockEpochsKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockEpochsKeeper) EXPECT() *MockEpochsKeeperMockRecorder { + return m.recorder +} + +// GetEpochInfo mocks base method. +func (m *MockEpochsKeeper) GetEpochInfo(arg0 types1.Context, arg1 string) (types0.EpochInfo, bool) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetEpochInfo", arg0, arg1) + ret0, _ := ret[0].(types0.EpochInfo) + ret1, _ := ret[1].(bool) + return ret0, ret1 +} + +// GetEpochInfo indicates an expected call of GetEpochInfo. +func (mr *MockEpochsKeeperMockRecorder) GetEpochInfo(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetEpochInfo", reflect.TypeOf((*MockEpochsKeeper)(nil).GetEpochInfo), arg0, arg1) +} + +// MockStakingKeeper is a mock of StakingKeeper interface. +type MockStakingKeeper struct { + ctrl *gomock.Controller + recorder *MockStakingKeeperMockRecorder +} + +// MockStakingKeeperMockRecorder is the mock recorder for MockStakingKeeper. +type MockStakingKeeperMockRecorder struct { + mock *MockStakingKeeper +} + +// NewMockStakingKeeper creates a new mock instance. +func NewMockStakingKeeper(ctrl *gomock.Controller) *MockStakingKeeper { + mock := &MockStakingKeeper{ctrl: ctrl} + mock.recorder = &MockStakingKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockStakingKeeper) EXPECT() *MockStakingKeeperMockRecorder { + return m.recorder +} + +// UnbondingTime mocks base method. +func (m *MockStakingKeeper) UnbondingTime(arg0 types1.Context) time.Duration { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UnbondingTime", arg0) + ret0, _ := ret[0].(time.Duration) + return ret0 +} + +// UnbondingTime indicates an expected call of UnbondingTime. +func (mr *MockStakingKeeperMockRecorder) UnbondingTime(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UnbondingTime", reflect.TypeOf((*MockStakingKeeper)(nil).UnbondingTime), arg0) +} + +// MockOperatorKeeper is a mock of OperatorKeeper interface. +type MockOperatorKeeper struct { + ctrl *gomock.Controller + recorder *MockOperatorKeeperMockRecorder +} + +// MockOperatorKeeperMockRecorder is the mock recorder for MockOperatorKeeper. +type MockOperatorKeeperMockRecorder struct { + mock *MockOperatorKeeper +} + +// NewMockOperatorKeeper creates a new mock instance. +func NewMockOperatorKeeper(ctrl *gomock.Controller) *MockOperatorKeeper { + mock := &MockOperatorKeeper{ctrl: ctrl} + mock.recorder = &MockOperatorKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockOperatorKeeper) EXPECT() *MockOperatorKeeperMockRecorder { + return m.recorder +} + +// ApplySlashForHeight mocks base method. +func (m *MockOperatorKeeper) ApplySlashForHeight(ctx types1.Context, operatorAccAddress types1.AccAddress, avsAddress string, height uint64, fraction types1.Dec, infraction types2.Infraction, jailDuration time.Duration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ApplySlashForHeight", ctx, operatorAccAddress, avsAddress, height, fraction, infraction, jailDuration) + ret0, _ := ret[0].(error) + return ret0 +} + +// ApplySlashForHeight indicates an expected call of ApplySlashForHeight. +func (mr *MockOperatorKeeperMockRecorder) ApplySlashForHeight(ctx, operatorAccAddress, avsAddress, height, fraction, infraction, jailDuration any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ApplySlashForHeight", reflect.TypeOf((*MockOperatorKeeper)(nil).ApplySlashForHeight), ctx, operatorAccAddress, avsAddress, height, fraction, infraction, jailDuration) +} + +// DeleteOperatorAddressForChainIDAndConsAddr mocks base method. +func (m *MockOperatorKeeper) DeleteOperatorAddressForChainIDAndConsAddr(ctx types1.Context, chainID string, consAddr types1.ConsAddress) { + m.ctrl.T.Helper() + m.ctrl.Call(m, "DeleteOperatorAddressForChainIDAndConsAddr", ctx, chainID, consAddr) +} + +// DeleteOperatorAddressForChainIDAndConsAddr indicates an expected call of DeleteOperatorAddressForChainIDAndConsAddr. +func (mr *MockOperatorKeeperMockRecorder) DeleteOperatorAddressForChainIDAndConsAddr(ctx, chainID, consAddr any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteOperatorAddressForChainIDAndConsAddr", reflect.TypeOf((*MockOperatorKeeper)(nil).DeleteOperatorAddressForChainIDAndConsAddr), ctx, chainID, consAddr) +} + +// GetActiveOperatorsForChainID mocks base method. +func (m *MockOperatorKeeper) GetActiveOperatorsForChainID(arg0 types1.Context, arg1 string) ([]types1.AccAddress, []keys.WrappedConsKey) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetActiveOperatorsForChainID", arg0, arg1) + ret0, _ := ret[0].([]types1.AccAddress) + ret1, _ := ret[1].([]keys.WrappedConsKey) + return ret0, ret1 +} + +// GetActiveOperatorsForChainID indicates an expected call of GetActiveOperatorsForChainID. +func (mr *MockOperatorKeeperMockRecorder) GetActiveOperatorsForChainID(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetActiveOperatorsForChainID", reflect.TypeOf((*MockOperatorKeeper)(nil).GetActiveOperatorsForChainID), arg0, arg1) +} + +// GetChainIDsForOperator mocks base method. +func (m *MockOperatorKeeper) GetChainIDsForOperator(arg0 types1.Context, arg1 string) ([]string, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetChainIDsForOperator", arg0, arg1) + ret0, _ := ret[0].([]string) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetChainIDsForOperator indicates an expected call of GetChainIDsForOperator. +func (mr *MockOperatorKeeperMockRecorder) GetChainIDsForOperator(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetChainIDsForOperator", reflect.TypeOf((*MockOperatorKeeper)(nil).GetChainIDsForOperator), arg0, arg1) +} + +// GetOperatorAddressForChainIDAndConsAddr mocks base method. +func (m *MockOperatorKeeper) GetOperatorAddressForChainIDAndConsAddr(arg0 types1.Context, arg1 string, arg2 types1.ConsAddress) (bool, types1.AccAddress) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOperatorAddressForChainIDAndConsAddr", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(types1.AccAddress) + return ret0, ret1 +} + +// GetOperatorAddressForChainIDAndConsAddr indicates an expected call of GetOperatorAddressForChainIDAndConsAddr. +func (mr *MockOperatorKeeperMockRecorder) GetOperatorAddressForChainIDAndConsAddr(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperatorAddressForChainIDAndConsAddr", reflect.TypeOf((*MockOperatorKeeper)(nil).GetOperatorAddressForChainIDAndConsAddr), arg0, arg1, arg2) +} + +// GetOperatorConsKeyForChainID mocks base method. +func (m *MockOperatorKeeper) GetOperatorConsKeyForChainID(arg0 types1.Context, arg1 types1.AccAddress, arg2 string) (bool, keys.WrappedConsKey, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetOperatorConsKeyForChainID", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + ret1, _ := ret[1].(keys.WrappedConsKey) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +// GetOperatorConsKeyForChainID indicates an expected call of GetOperatorConsKeyForChainID. +func (mr *MockOperatorKeeperMockRecorder) GetOperatorConsKeyForChainID(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetOperatorConsKeyForChainID", reflect.TypeOf((*MockOperatorKeeper)(nil).GetOperatorConsKeyForChainID), arg0, arg1, arg2) +} + +// GetVotePowerForChainID mocks base method. +func (m *MockOperatorKeeper) GetVotePowerForChainID(arg0 types1.Context, arg1 []types1.AccAddress, arg2 string) ([]int64, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetVotePowerForChainID", arg0, arg1, arg2) + ret0, _ := ret[0].([]int64) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetVotePowerForChainID indicates an expected call of GetVotePowerForChainID. +func (mr *MockOperatorKeeperMockRecorder) GetVotePowerForChainID(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetVotePowerForChainID", reflect.TypeOf((*MockOperatorKeeper)(nil).GetVotePowerForChainID), arg0, arg1, arg2) +} + +// IsOperatorRemovingKeyFromChainID mocks base method. +func (m *MockOperatorKeeper) IsOperatorRemovingKeyFromChainID(arg0 types1.Context, arg1 types1.AccAddress, arg2 string) bool { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IsOperatorRemovingKeyFromChainID", arg0, arg1, arg2) + ret0, _ := ret[0].(bool) + return ret0 +} + +// IsOperatorRemovingKeyFromChainID indicates an expected call of IsOperatorRemovingKeyFromChainID. +func (mr *MockOperatorKeeperMockRecorder) IsOperatorRemovingKeyFromChainID(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsOperatorRemovingKeyFromChainID", reflect.TypeOf((*MockOperatorKeeper)(nil).IsOperatorRemovingKeyFromChainID), arg0, arg1, arg2) +} + +// MockDelegationKeeper is a mock of DelegationKeeper interface. +type MockDelegationKeeper struct { + ctrl *gomock.Controller + recorder *MockDelegationKeeperMockRecorder +} + +// MockDelegationKeeperMockRecorder is the mock recorder for MockDelegationKeeper. +type MockDelegationKeeperMockRecorder struct { + mock *MockDelegationKeeper +} + +// NewMockDelegationKeeper creates a new mock instance. +func NewMockDelegationKeeper(ctrl *gomock.Controller) *MockDelegationKeeper { + mock := &MockDelegationKeeper{ctrl: ctrl} + mock.recorder = &MockDelegationKeeperMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockDelegationKeeper) EXPECT() *MockDelegationKeeperMockRecorder { + return m.recorder +} + +// DecrementUndelegationHoldCount mocks base method. +func (m *MockDelegationKeeper) DecrementUndelegationHoldCount(arg0 types1.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DecrementUndelegationHoldCount", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DecrementUndelegationHoldCount indicates an expected call of DecrementUndelegationHoldCount. +func (mr *MockDelegationKeeperMockRecorder) DecrementUndelegationHoldCount(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DecrementUndelegationHoldCount", reflect.TypeOf((*MockDelegationKeeper)(nil).DecrementUndelegationHoldCount), arg0, arg1) +} + +// IncrementUndelegationHoldCount mocks base method. +func (m *MockDelegationKeeper) IncrementUndelegationHoldCount(arg0 types1.Context, arg1 []byte) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "IncrementUndelegationHoldCount", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// IncrementUndelegationHoldCount indicates an expected call of IncrementUndelegationHoldCount. +func (mr *MockDelegationKeeperMockRecorder) IncrementUndelegationHoldCount(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementUndelegationHoldCount", reflect.TypeOf((*MockDelegationKeeper)(nil).IncrementUndelegationHoldCount), arg0, arg1) +} diff --git a/x/appchain/coordinator/types/keys.go b/x/appchain/coordinator/types/keys.go index 16565e190..6ffbb1cd3 100644 --- a/x/appchain/coordinator/types/keys.go +++ b/x/appchain/coordinator/types/keys.go @@ -1,6 +1,10 @@ package types import ( + "bytes" + fmt "fmt" + + "github.com/ExocoreNetwork/exocore/utils" epochstypes "github.com/ExocoreNetwork/exocore/x/epochs/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -47,16 +51,39 @@ const ( SubscriberGenesisBytePrefix // InitTimeoutBytePrefix is the prefix for the init timeout key InitTimeoutBytePrefix + // PortBytePrefix is the prefix for the port key + PortBytePrefix + // ChannelForChainBytePrefix is the prefix for the channel for chain key + ChannelForChainBytePrefix + // ChainForChannelBytePrefix is the prefix for the chain for channel key + ChainForChannelBytePrefix + // ChainInitTimeoutBytePrefix is the prefix for the chain init timeout key + ChainInitTimeoutBytePrefix + // InitChainHeightBytePrefix is the prefix for the init chain height key + InitChainHeightBytePrefix + // HeightToChainVscIDBytePrefix is the prefix for the height to chain id + vsc id key + HeightToChainVscIDBytePrefix + // SlashAcksBytePrefix is the prefix for the slashing acks key + SlashAcksBytePrefix + // SubscriberValidatorBytePrefix is the prefix for the subscriber validator key + SubscriberValidatorBytePrefix + // MaxValidatorsBytePrefix is the prefix for the max validators key + MaxValidatorsBytePrefix + // VscIDForChainBytePrefix is the prefix to go from chainID to vscID + VscIDForChainBytePrefix + // ChainIDToVscPacketsBytePrefix is the prefix for the vsc packets key for a chainID + ChainIDToVscPacketsBytePrefix + // VscTimeoutBytePrefix is the prefix for the vsc timeout key + VscTimeoutBytePrefix + // ConsKeysToPruneBytePrefix is the prefix for the consensus keys to prune key + ConsKeysToPruneBytePrefix + // MaturityVscIDForChainIDConsAddrBytePrefix is the prefix for the vsc id for chain cons addr key + MaturityVscIDForChainIDConsAddrBytePrefix + // UndelegationsToReleaseBytePrefix is the prefix for the undelegations to release key + UndelegationsToReleaseBytePrefix ) -// AppendMany appends a variable number of byte slices together -func AppendMany(byteses ...[]byte) (out []byte) { - for _, bytes := range byteses { - out = append(out, bytes...) - } - return out -} - +// ParamsKey returns the key under which the coordinator module's parameters are stored. func ParamsKey() []byte { return []byte{ParamsBytePrefix} } @@ -65,7 +92,7 @@ func ParamsKey() []byte { // to begin with the starting of the epoch with identifier and number. Since the data // is stored alphabetically, this key structure is apt. func PendingSubscriberChainKey(epochIdentifier string, epochNumber uint64) []byte { - return AppendMany( + return utils.AppendMany( []byte{PendingSubscriberChainBytePrefix}, []byte(epochIdentifier), sdk.Uint64ToBigEndian(epochNumber), @@ -103,9 +130,137 @@ func SubscriberGenesisKey(chainID string) []byte { // InitTimeoutEpochKey returns the key under which the list of chains which will timeout (if not // initialized by then) at the beginning of the epoch is stored. func InitTimeoutEpochKey(epoch epochstypes.Epoch) []byte { - return AppendMany( - []byte{InitTimeoutBytePrefix}, - []byte(epoch.EpochIdentifier), - sdk.Uint64ToBigEndian(epoch.EpochNumber), + return utils.AppendMany( // safe to do, since... + []byte{InitTimeoutBytePrefix}, // size 1 + []byte(epoch.EpochIdentifier), // size unknown + sdk.Uint64ToBigEndian(epoch.EpochNumber), // size 8 + ) +} + +// PortKey returns the key for the port (hello Harry Potter!) +func PortKey() []byte { + return []byte{PortBytePrefix} +} + +// ChannelForChainKey returns the key under which the ibc channel id +// for the given chainId is stored. +func ChannelForChainKey(chainID string) []byte { + return append([]byte{ChannelForChainBytePrefix}, []byte(chainID)...) +} + +// ChainForChannelKey returns the key under which the chainId +// for the given channelId is stored. +func ChainForChannelKey(channelID string) []byte { + return append([]byte{ChainForChannelBytePrefix}, []byte(channelID)...) +} + +// ChainInitTimeoutKey returns the key for the chain init timeout +func ChainInitTimeoutKey(chainID string) []byte { + return append([]byte{ChainInitTimeoutBytePrefix}, []byte(chainID)...) +} + +// InitChainHeightKey returns the key for the init chain height +func InitChainHeightKey(chainID string) []byte { + return append([]byte{InitChainHeightBytePrefix}, []byte(chainID)...) +} + +// HeightToChainVscIDKey returns the key for the height to chain id + vsc id +func HeightToChainVscIDKey(chainID string, vscID uint64) []byte { + return utils.AppendMany( + []byte{HeightToChainVscIDBytePrefix}, + []byte(chainID), + sdk.Uint64ToBigEndian(vscID), + ) +} + +// SlashAcksKey returns the key for the slashing acks +func SlashAcksKey(chainID string) []byte { + return append( + []byte{SlashAcksBytePrefix}, + []byte(chainID)..., + ) +} + +// SubscriberValidatorKey returns the key for the subscriber validator +// It is used to store the validator object for the subscriber chain, indexed by +// prefix + len(chainID) + chainID + consensusAddr +func SubscriberValidatorKey(chainID string, consensusAddr []byte) []byte { + return utils.AppendMany( + []byte{SubscriberValidatorBytePrefix}, + utils.ChainIDWithLenKey(chainID), + consensusAddr, + ) +} + +// MaxValidatorsKey returns the key for the max validators +func MaxValidatorsKey(chainID string) []byte { + return append([]byte{MaxValidatorsBytePrefix}, []byte(chainID)...) +} + +// VscIDForChainKey returns the key for the vsc id to chain +func VscIDForChainKey(chainID string) []byte { + return append([]byte{VscIDForChainBytePrefix}, []byte(chainID)...) +} + +// ChainIDToVscPacketsKey returns the key for the vsc packets for a chain +func ChainIDToVscPacketsKey(chainID string) []byte { + return append([]byte{ChainIDToVscPacketsBytePrefix}, []byte(chainID)...) +} + +// VscTimeoutKey returns the key for the vsc timeout +func VscTimeoutKey(chainID string, vscID uint64) []byte { + return utils.AppendMany( + []byte{VscTimeoutBytePrefix}, + []byte(chainID), + sdk.Uint64ToBigEndian(vscID), + ) +} + +// ParseVscTimeoutKey parses the chainID and vscID from the key of the format +// prefix + chainID + vscID +func ParseVscTimeoutKey(bz []byte) (chainID string, vscID uint64, err error) { + return ParseChainIDAndUintIDKey(VscTimeoutBytePrefix, bz) +} + +// ParseChainIDAndUintIDKey returns the chain ID and uint ID for a ChainIdAndUintId key +func ParseChainIDAndUintIDKey(prefix byte, bz []byte) (string, uint64, error) { + expectedPrefix := []byte{prefix} + prefixL := len(expectedPrefix) + if len(bz) < prefixL+8 { // for uint64 + return "", 0, fmt.Errorf("invalid key length; expected at least %d bytes, got: %d", prefixL+8, len(bz)) + } + if prefix := bz[:prefixL]; !bytes.Equal(prefix, expectedPrefix) { + return "", 0, fmt.Errorf("invalid prefix; expected: %X, got: %X", expectedPrefix, prefix) + } + uintID := sdk.BigEndianToUint64(bz[len(bz)-8:]) + chainID := string(bz[prefixL : len(bz)-8]) + return chainID, uintID, nil +} + +// ConsAddrsToPruneKey returns the key for the consensus keys to prune, indexed by the +// chainID + vscID as the key. +func ConsAddrsToPruneKey(chainID string, vscID uint64) []byte { + return utils.AppendMany( + []byte{ConsKeysToPruneBytePrefix}, + []byte(chainID), + sdk.Uint64ToBigEndian(vscID), + ) +} + +// MaturityVscIDForChainIDConsAddrKey returns the key for the vsc id for chain cons addr +func MaturityVscIDForChainIDConsAddrKey(chainID string, consAddr sdk.ConsAddress) []byte { + return utils.AppendMany( + []byte{MaturityVscIDForChainIDConsAddrBytePrefix}, + []byte(chainID), + consAddr.Bytes(), + ) +} + +// UndelegationsToReleaseKey returns the key for the undelegations to release +func UndelegationsToReleaseKey(chainID string, vscID uint64) []byte { + return utils.AppendMany( + []byte{UndelegationsToReleaseBytePrefix}, + []byte(chainID), + sdk.Uint64ToBigEndian(vscID), ) } diff --git a/x/appchain/subscriber/keeper/abci.go b/x/appchain/subscriber/keeper/abci.go deleted file mode 100644 index cbc3ac339..000000000 --- a/x/appchain/subscriber/keeper/abci.go +++ /dev/null @@ -1,12 +0,0 @@ -package keeper - -import ( - abci "github.com/cometbft/cometbft/abci/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -func (k Keeper) BeginBlock(sdk.Context) {} - -func (k Keeper) EndBlock(sdk.Context) []abci.ValidatorUpdate { - return []abci.ValidatorUpdate{} -} diff --git a/x/appchain/subscriber/keeper/connection.go b/x/appchain/subscriber/keeper/connection.go new file mode 100644 index 000000000..4bae8dfec --- /dev/null +++ b/x/appchain/subscriber/keeper/connection.go @@ -0,0 +1,86 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + types "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + conntypes "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +// SetCoordinatorClientID sets the clientID of the coordinator chain +func (k Keeper) SetCoordinatorClientID(ctx sdk.Context, clientID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.CoordinatorClientIDKey(), []byte(clientID)) +} + +// GetCoordinatorClientID gets the clientID of the coordinator chain +func (k Keeper) GetCoordinatorClientID(ctx sdk.Context) (string, bool) { + store := ctx.KVStore(k.storeKey) + key := types.CoordinatorClientIDKey() + if !store.Has(key) { + return "", false + } + bz := store.Get(key) + return string(bz), true +} + +// SetCoordinatorChannel sets the channelId for the channel to the coordinator. +func (k Keeper) SetCoordinatorChannel(ctx sdk.Context, channelID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.CoordinatorChannelKey(), []byte(channelID)) +} + +// GetCoordinatorChannel gets the channelId for the channel to the coordinator. +func (k Keeper) GetCoordinatorChannel(ctx sdk.Context) (string, bool) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.CoordinatorChannelKey()) + if len(bz) == 0 { + return "", false + } + return string(bz), true +} + +// DeleteCoordinatorChannel deletes the channelId for the channel to the coordinator. +func (k Keeper) DeleteCoordinatorChannel(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.CoordinatorChannelKey()) +} + +// VerifyCoordinatorChain verifies the chain trying to connect on the channel handshake. +func (k Keeper) VerifyCoordinatorChain(ctx sdk.Context, connectionHops []string) error { + if len(connectionHops) != 1 { + return channeltypes.ErrTooManyConnectionHops.Wrapf( + "must have direct connection to coordinator chain, found %d hops", + len(connectionHops), + ) + } + connectionID := connectionHops[0] + conn, ok := k.connectionKeeper.GetConnection(ctx, connectionID) + if !ok { + return errorsmod.Wrapf( + conntypes.ErrConnectionNotFound, + "connection not found for connection Id: %s", + connectionID, + ) + } + // Verify that client id is expected clientId + expectedClientID, ok := k.GetCoordinatorClientID(ctx) + if !ok { + return errorsmod.Wrapf( + clienttypes.ErrInvalidClient, + "could not find coordinator client id", + ) + } + if expectedClientID != conn.ClientId { + return errorsmod.Wrapf( + clienttypes.ErrInvalidClient, + "invalid client: %s, channel must be built on top of client: %s", + conn.ClientId, + expectedClientID, + ) + } + + return nil +} diff --git a/x/appchain/subscriber/keeper/connection_test.go b/x/appchain/subscriber/keeper/connection_test.go new file mode 100644 index 000000000..01c6a4461 --- /dev/null +++ b/x/appchain/subscriber/keeper/connection_test.go @@ -0,0 +1,79 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + keepertest "github.com/ExocoreNetwork/exocore/testutil/keeper" + conntypes "github.com/cosmos/ibc-go/v7/modules/core/03-connection/types" +) + +func TestSetGetCoordinatorClientID(t *testing.T) { + keeper, ctx, _ := keepertest.NewSubscriberKeeper(t) + + clientID := "07-tendermint-0" + keeper.SetCoordinatorClientID(ctx, clientID) + + retrievedID, found := keeper.GetCoordinatorClientID(ctx) + require.True(t, found) + require.Equal(t, clientID, retrievedID) +} + +func TestSetGetDeleteCoordinatorChannel(t *testing.T) { + keeper, ctx, _ := keepertest.NewSubscriberKeeper(t) + + channelID := "channel-0" + keeper.SetCoordinatorChannel(ctx, channelID) + + retrievedID, found := keeper.GetCoordinatorChannel(ctx) + require.True(t, found) + require.Equal(t, channelID, retrievedID) + + keeper.DeleteCoordinatorChannel(ctx) + + _, found = keeper.GetCoordinatorChannel(ctx) + require.False(t, found) +} + +func TestVerifyCoordinatorChain(t *testing.T) { + keeper, ctx, mocks := keepertest.NewSubscriberKeeper(t) + + clientID := "07-tendermint-0" + connectionID := "connection-0" + keeper.SetCoordinatorClientID(ctx, clientID) + + // Test with valid connection + mocks.ConnectionKeeper.EXPECT().GetConnection(ctx, connectionID).Return( + conntypes.ConnectionEnd{ClientId: clientID}, + true, + ) + + err := keeper.VerifyCoordinatorChain(ctx, []string{connectionID}) + require.NoError(t, err) + + // Test with too many connection hops + err = keeper.VerifyCoordinatorChain(ctx, []string{connectionID, "connection-1"}) + require.Error(t, err) + require.Contains(t, err.Error(), "must have direct connection to coordinator chain") + + // Test with non-existent connection + mocks.ConnectionKeeper.EXPECT().GetConnection(ctx, "non-existent").Return( + conntypes.ConnectionEnd{}, + false, + ) + + err = keeper.VerifyCoordinatorChain(ctx, []string{"non-existent"}) + require.Error(t, err) + require.Contains(t, err.Error(), "connection not found") + + // Test with mismatched client ID + mocks.ConnectionKeeper.EXPECT().GetConnection(ctx, connectionID).Return( + conntypes.ConnectionEnd{ClientId: "wrong-client-id"}, + true, + ) + + err = keeper.VerifyCoordinatorChain(ctx, []string{connectionID}) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid client") +} diff --git a/x/appchain/subscriber/keeper/distribution.go b/x/appchain/subscriber/keeper/distribution.go new file mode 100644 index 000000000..ca32aabf8 --- /dev/null +++ b/x/appchain/subscriber/keeper/distribution.go @@ -0,0 +1,29 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +func (k Keeper) ChannelOpenInit(ctx sdk.Context, msg *channeltypes.MsgChannelOpenInit) ( + *channeltypes.MsgChannelOpenInitResponse, error, +) { + return k.ibcCoreKeeper.ChannelOpenInit(sdk.WrapSDKContext(ctx), msg) +} + +func (k Keeper) TransferChannelExists(ctx sdk.Context, channelID string) bool { + _, found := k.channelKeeper.GetChannel(ctx, transfertypes.PortID, channelID) + return found +} + +func (k Keeper) GetConnectionHops(ctx sdk.Context, srcPort, srcChan string) ([]string, error) { + ch, found := k.channelKeeper.GetChannel(ctx, srcPort, srcChan) + if !found { + return []string{}, errorsmod.Wrapf(commontypes.ErrChannelNotFound, + "cannot get connection hops from non-existent channel") + } + return ch.ConnectionHops, nil +} diff --git a/x/appchain/subscriber/keeper/distribution_test.go b/x/appchain/subscriber/keeper/distribution_test.go new file mode 100644 index 000000000..d05c80a61 --- /dev/null +++ b/x/appchain/subscriber/keeper/distribution_test.go @@ -0,0 +1,125 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ExocoreNetwork/exocore/testutil/keeper" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +func TestKeeper_ChannelOpenInit(t *testing.T) { + k, ctx, mocks := keeper.NewSubscriberKeeper(t) + + msg := &channeltypes.MsgChannelOpenInit{ + PortId: "test-port", + Channel: channeltypes.Channel{ + State: channeltypes.INIT, + Ordering: channeltypes.UNORDERED, + Counterparty: channeltypes.Counterparty{ + PortId: "counterparty-port", + ChannelId: "", + }, + ConnectionHops: []string{"connection-0"}, + Version: "1", + }, + } + + expectedResponse := &channeltypes.MsgChannelOpenInitResponse{} + + mocks.IBCCoreKeeper.EXPECT(). + ChannelOpenInit(gomock.Any(), msg). + Return(expectedResponse, nil) + + response, err := k.ChannelOpenInit(ctx, msg) + + require.NoError(t, err) + assert.Equal(t, expectedResponse, response) +} + +func TestKeeper_TransferChannelExists(t *testing.T) { + k, ctx, mocks := keeper.NewSubscriberKeeper(t) + + testCases := []struct { + name string + channelID string + exists bool + }{ + { + name: "Existing channel", + channelID: "channel-0", + exists: true, + }, + { + name: "Non-existing channel", + channelID: "channel-1", + exists: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + mocks.ChannelKeeper.EXPECT(). + GetChannel(ctx, transfertypes.PortID, tc.channelID). + Return(channeltypes.Channel{}, tc.exists) + + exists := k.TransferChannelExists(ctx, tc.channelID) + assert.Equal(t, tc.exists, exists) + }) + } +} + +func TestKeeper_GetConnectionHops(t *testing.T) { + k, ctx, mocks := keeper.NewSubscriberKeeper(t) + + testCases := []struct { + name string + srcPort string + srcChan string + expectedHops []string + expectedError bool + }{ + { + name: "Existing channel", + srcPort: "test-port", + srcChan: "channel-0", + expectedHops: []string{"connection-0"}, + expectedError: false, + }, + { + name: "Non-existing channel", + srcPort: "test-port", + srcChan: "channel-1", + expectedHops: []string{}, + expectedError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + if tc.expectedError { + mocks.ChannelKeeper.EXPECT(). + GetChannel(ctx, tc.srcPort, tc.srcChan). + Return(channeltypes.Channel{}, false) + } else { + mocks.ChannelKeeper.EXPECT(). + GetChannel(ctx, tc.srcPort, tc.srcChan). + Return(channeltypes.Channel{ConnectionHops: tc.expectedHops}, true) + } + + hops, err := k.GetConnectionHops(ctx, tc.srcPort, tc.srcChan) + + if tc.expectedError { + require.Error(t, err) + assert.Empty(t, hops) + } else { + require.NoError(t, err) + assert.Equal(t, tc.expectedHops, hops) + } + }) + } +} diff --git a/x/appchain/subscriber/keeper/genesis.go b/x/appchain/subscriber/keeper/genesis.go index 3e353f09e..88f85b2cc 100644 --- a/x/appchain/subscriber/keeper/genesis.go +++ b/x/appchain/subscriber/keeper/genesis.go @@ -1,18 +1,49 @@ package keeper import ( + "fmt" + + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" abci "github.com/cometbft/cometbft/abci/types" sdk "github.com/cosmos/cosmos-sdk/types" ) +// InitGenesis initializes the subscriber module's state from a genesis state. +// This state is typically obtained from a coordinator chain, however, it +// may be exported from a previous state of the subscriber chain. func (k Keeper) InitGenesis(ctx sdk.Context, gs types.GenesisState) []abci.ValidatorUpdate { - k.SetParams(ctx, gs.Params) - return []abci.ValidatorUpdate{} + // do not support switchover use case yet. + if ctx.BlockHeight() > 0 { + // this is not supported not because of any technical limitations + // but rather because the business logic and the security logic + // around switchover is not yet fully designed. + panic("switchover use case not supported yet") + } + k.SetSubscriberParams(ctx, gs.Params) + k.SetPort(ctx, commontypes.SubscriberPortID) + // only bind to the port if the capability keeper hasn't done so already + if !k.IsBound(ctx, commontypes.SubscriberPortID) { + k.Logger(ctx).Info("binding port", "port", commontypes.SubscriberPortID) + if err := k.portKeeper.BindPort(ctx, commontypes.SubscriberPortID); err != nil { + panic(fmt.Sprintf("could not claim port capability: %v", err)) + } + } + // the client state and the consensus state are provided by the coordinator. + clientID, err := k.clientKeeper.CreateClient( + ctx, gs.Coordinator.ClientState, gs.Coordinator.ConsensusState, + ) + if err != nil { + panic(fmt.Sprintf("could not create client for coordinator chain: %v", err)) + } + k.SetCoordinatorClientID(ctx, clientID) + // TODO: in the case of switchover, this number may be a different value + k.SetValsetUpdateIDForHeight(ctx, ctx.BlockHeight(), types.FirstValsetUpdateID) + return k.ApplyValidatorChanges(ctx, gs.Coordinator.InitialValSet) } func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { return &types.GenesisState{ - Params: k.GetParams(ctx), + Params: k.GetSubscriberParams(ctx), } } diff --git a/x/appchain/subscriber/keeper/genesis_test.go b/x/appchain/subscriber/keeper/genesis_test.go new file mode 100644 index 000000000..e934ca262 --- /dev/null +++ b/x/appchain/subscriber/keeper/genesis_test.go @@ -0,0 +1,72 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + abci "github.com/cometbft/cometbft/abci/types" + _07_tendermint "github.com/cosmos/ibc-go/v7/modules/light-clients/07-tendermint" +) + +func TestInitGenesis(t *testing.T) { + k, ctx, mocks := keeper.NewSubscriberKeeper(t) + + // Mock the necessary function calls + mocks.ScopedKeeper.EXPECT().GetCapability(ctx, gomock.Any()).Return(nil, false) + mocks.PortKeeper.EXPECT().BindPort(ctx, commontypes.SubscriberPortID).Return(nil) + mocks.ClientKeeper.EXPECT().CreateClient(ctx, gomock.Any(), gomock.Any()).Return("test-client-id", nil) + + genesisState := types.GenesisState{ + Params: commontypes.DefaultSubscriberParams(), + Coordinator: commontypes.CoordinatorInfo{ + ClientState: &_07_tendermint.ClientState{}, + ConsensusState: &_07_tendermint.ConsensusState{}, + InitialValSet: []abci.ValidatorUpdate{}, + }, + } + + validatorUpdates := k.InitGenesis(ctx, genesisState) + + // Verify that the genesis state was properly initialized + require.Equal(t, genesisState.Params, k.GetSubscriberParams(ctx)) + require.Equal(t, commontypes.SubscriberPortID, k.GetPort(ctx)) + clientID, ok := k.GetCoordinatorClientID(ctx) + require.True(t, ok) + require.Equal(t, "test-client-id", clientID) + require.Equal(t, types.FirstValsetUpdateID, k.GetValsetUpdateIDForHeight(ctx, ctx.BlockHeight())) + require.NotNil(t, validatorUpdates) +} + +func TestExportGenesis(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + // Set up some state + k.SetSubscriberParams(ctx, commontypes.DefaultSubscriberParams()) + + genesisState := k.ExportGenesis(ctx) + + // Verify that the genesis state was properly exported + require.Equal(t, commontypes.DefaultSubscriberParams(), genesisState.Params) +} + +func TestInitGenesisNonZeroHeight(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + // Set a non-zero block height + header := ctx.BlockHeader() + header.Height = 10 + ctx = ctx.WithBlockHeader(header) + + genesisState := types.GenesisState{ + Params: commontypes.DefaultSubscriberParams(), + } + + require.Panics(t, func() { + k.InitGenesis(ctx, genesisState) + }, "InitGenesis should panic when block height is non-zero") +} diff --git a/x/appchain/subscriber/keeper/grpc_query.go b/x/appchain/subscriber/keeper/grpc_query.go index 70954845d..e0db3a20b 100644 --- a/x/appchain/subscriber/keeper/grpc_query.go +++ b/x/appchain/subscriber/keeper/grpc_query.go @@ -21,5 +21,5 @@ func (k Keeper) QueryParams( } ctx := sdk.UnwrapSDKContext(goCtx) - return &types.QueryParamsResponse{Params: k.GetParams(ctx)}, nil + return &types.QueryParamsResponse{Params: k.GetSubscriberParams(ctx)}, nil } diff --git a/x/appchain/subscriber/keeper/ibc_client.go b/x/appchain/subscriber/keeper/ibc_client.go new file mode 100644 index 000000000..06e515df7 --- /dev/null +++ b/x/appchain/subscriber/keeper/ibc_client.go @@ -0,0 +1,21 @@ +package keeper + +import ( + errorsmod "cosmossdk.io/errors" + sdk "github.com/cosmos/cosmos-sdk/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" +) + +// ChanCloseInit defines a wrapper function for the channel Keeper's function. +func (k Keeper) ChanCloseInit(ctx sdk.Context, portID, channelID string) error { + capName := host.ChannelCapabilityPath(portID, channelID) + chanCap, ok := k.scopedKeeper.GetCapability(ctx, capName) + if !ok { + return errorsmod.Wrapf( + channeltypes.ErrChannelCapabilityNotFound, + "could not retrieve channel capability at: %s", capName, + ) + } + return k.channelKeeper.ChanCloseInit(ctx, portID, channelID, chanCap) +} diff --git a/x/appchain/subscriber/keeper/ibc_client_test.go b/x/appchain/subscriber/keeper/ibc_client_test.go new file mode 100644 index 000000000..cc5ec6656 --- /dev/null +++ b/x/appchain/subscriber/keeper/ibc_client_test.go @@ -0,0 +1,63 @@ +package keeper_test + +import ( + "testing" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + keepertest "github.com/ExocoreNetwork/exocore/testutil/keeper" +) + +func TestKeeper_ChanCloseInit(t *testing.T) { + k, ctx, mocks := keepertest.NewSubscriberKeeper(t) + + testCases := []struct { + name string + portID string + channelID string + setup func() + expError bool + }{ + { + name: "success", + portID: "port-1", + channelID: "channel-1", + setup: func() { + capName := host.ChannelCapabilityPath("port-1", "channel-1") + capability := &capabilitytypes.Capability{ + Index: 1, + } + mocks.ScopedKeeper.EXPECT().GetCapability(gomock.Any(), capName).Return(capability, true) + mocks.ChannelKeeper.EXPECT().ChanCloseInit(gomock.Any(), "port-1", "channel-1", capability).Return(nil) + }, + expError: false, + }, + { + name: "capability not found", + portID: "port-2", + channelID: "channel-2", + setup: func() { + capName := host.ChannelCapabilityPath("port-2", "channel-2") + mocks.ScopedKeeper.EXPECT().GetCapability(gomock.Any(), capName).Return(nil, false) + }, + expError: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + tc.setup() + err := k.ChanCloseInit(ctx, tc.portID, tc.channelID) + if tc.expError { + require.Error(t, err) + require.ErrorIs(t, err, channeltypes.ErrChannelCapabilityNotFound) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/x/appchain/subscriber/keeper/impl_sdk.go b/x/appchain/subscriber/keeper/impl_sdk.go new file mode 100644 index 000000000..c563ccb9a --- /dev/null +++ b/x/appchain/subscriber/keeper/impl_sdk.go @@ -0,0 +1,165 @@ +package keeper + +import ( + "time" + + "cosmossdk.io/math" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + genutiltypes "github.com/cosmos/cosmos-sdk/x/genutil/types" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" +) + +// This file contains the implementations of the Comos SDK level expected keepers +// for the subscriber's Keeper. This allows us to use the subscriber's keeper +// as an input into the slashing and the evidence modules. These modules then +// handle the slashing calls so that we do not have to implement them separately. +// Note that the subscriber chain is deemed to be trusted because the coordinator +// will not verify the evidence any further. An upgrade overcoming this has just +// been merged into interchain-security, which we can pick up later. +// https://github.com/orgs/cosmos/projects/28/views/11?pane=issue&itemId=21248976 + +// interface guards +var ( + _ slashingtypes.StakingKeeper = Keeper{} + _ evidencetypes.StakingKeeper = Keeper{} + _ clienttypes.StakingKeeper = Keeper{} + _ genutiltypes.StakingKeeper = Keeper{} +) + +// GetParams returns an empty staking params. It is used by the interfaces above, but the returned +// value is never examined. +func (k Keeper) GetParams(sdk.Context) stakingtypes.Params { + return stakingtypes.Params{} +} + +// This function is used by the slashing module to store the validator public keys into the +// state. These were previously verified in the evidence module but have since been removed. +func (k Keeper) IterateValidators(sdk.Context, + func(int64, stakingtypes.ValidatorI) bool, +) { + // no op +} + +// simply unimplemented because it is not needed +func (k Keeper) Validator(sdk.Context, sdk.ValAddress) stakingtypes.ValidatorI { + panic("unimplemented on this keeper") +} + +// ValidatorByConsAddr returns an empty validator +func (k Keeper) ValidatorByConsAddr(sdk.Context, sdk.ConsAddress) stakingtypes.ValidatorI { + /* + NOTE: + + The evidence module will call this function when it handles equivocation evidence. + The returned value must not be nil and must not have an UNBONDED validator status, + or evidence will reject it. + + Also, the slashing module will call this function when it observes downtime. In that case + the only requirement on the returned value is that it isn't null. + */ + return stakingtypes.Validator{} +} + +// Calls SlashWithInfractionReason with Infraction_INFRACTION_UNSPECIFIED. +// It should not be called anywhere. +func (k Keeper) Slash( + ctx sdk.Context, + addr sdk.ConsAddress, + infractionHeight, power int64, + slashFactor sdk.Dec, +) math.Int { + return k.SlashWithInfractionReason( + ctx, addr, infractionHeight, + power, slashFactor, + stakingtypes.Infraction_INFRACTION_UNSPECIFIED, + ) +} + +// Slash queues a slashing request for the the coordinator chain. +// All queued slashing requests will be cleared in EndBlock. +// Called by slashing keeper. +func (k Keeper) SlashWithInfractionReason( + ctx sdk.Context, + addr sdk.ConsAddress, + infractionHeight, power int64, + _ sdk.Dec, + infraction stakingtypes.Infraction, +) math.Int { + if infraction == stakingtypes.Infraction_INFRACTION_UNSPECIFIED { + return math.ZeroInt() + } + + // get VSC ID for infraction height + vscID := k.GetValsetUpdateIDForHeight(ctx, infractionHeight) + + k.Logger(ctx).Debug( + "vscID obtained from mapped infraction height", + "infraction height", infractionHeight, + "vscID", vscID, + ) + + // this is the most important step in the function + // everything else is just here to implement StakingKeeper interface + // IBC packets are created from slash data and sent to the coordinator during EndBlock + k.QueueSlashPacket( + ctx, + abci.Validator{ + Address: addr.Bytes(), + Power: power, + }, + vscID, infraction, + ) + + // Only return to comply with the interface restriction + return math.ZeroInt() +} + +// Unimplemented because jailing happens on the coordinator chain. +func (k Keeper) Jail(sdk.Context, sdk.ConsAddress) {} + +// Same as above. +func (k Keeper) Unjail(sdk.Context, sdk.ConsAddress) {} + +// Cannot delegate on this chain, and this should not be called by either the subscriber or the +// coordinator. +func (k Keeper) Delegation( + sdk.Context, + sdk.AccAddress, + sdk.ValAddress, +) stakingtypes.DelegationI { + panic("unimplemented on this keeper") +} + +// Unused by evidence and slashing. However, I have set it up to report the correct number +// anyway. Alternatively we could panic here as well. +func (k Keeper) MaxValidators(ctx sdk.Context) uint32 { + return k.GetParams(ctx).MaxValidators +} + +// In interchain-security, this does seem to have been implemented. However, I did not see +// the validators being persisted in the first place so I just returned an empty list. +// I also did not see this being used anywhere within the slashing module. +func (k Keeper) GetAllValidators(sdk.Context) []stakingtypes.Validator { + return []stakingtypes.Validator{} +} + +// IsJailed returns the outstanding slashing flag for the given validator adddress +func (k Keeper) IsValidatorJailed(ctx sdk.Context, addr sdk.ConsAddress) bool { + return k.HasOutstandingDowntime(ctx, addr) +} + +func (k Keeper) UnbondingTime(ctx sdk.Context) time.Duration { + return k.GetUnbondingPeriod(ctx) +} + +// implement interface method needed for x/genutil in sdk v47 +// returns empty updates and err +func (k Keeper) ApplyAndReturnValidatorSetUpdates( + sdk.Context, +) (updates []abci.ValidatorUpdate, err error) { + return +} diff --git a/x/appchain/subscriber/keeper/impl_sdk_test.go b/x/appchain/subscriber/keeper/impl_sdk_test.go new file mode 100644 index 000000000..d729834c9 --- /dev/null +++ b/x/appchain/subscriber/keeper/impl_sdk_test.go @@ -0,0 +1,110 @@ +package keeper_test + +import ( + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/ExocoreNetwork/exocore/testutil/keeper" +) + +func TestKeeper_GetParams(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + params := k.GetParams(ctx) + assert.Equal(t, stakingtypes.Params{}, params) +} + +func TestKeeper_IterateValidators(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + // This should be a no-op, so we're just checking that it doesn't panic + k.IterateValidators(ctx, func(int64, stakingtypes.ValidatorI) bool { + return false + }) +} + +func TestKeeper_ValidatorByConsAddr(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + consAddr := sdk.ConsAddress([]byte("test")) + validator := k.ValidatorByConsAddr(ctx, consAddr) + + assert.NotNil(t, validator) + assert.NotEqual(t, stakingtypes.Unbonded, validator.GetStatus()) +} + +func TestKeeper_SlashWithInfractionReason(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + consAddr := sdk.ConsAddress([]byte("test")) + infractionHeight := int64(100) + power := int64(1000) + slashFactor := sdk.NewDec(1) + + // Test with INFRACTION_UNSPECIFIED + result := k.SlashWithInfractionReason(ctx, consAddr, infractionHeight, power, slashFactor, stakingtypes.Infraction_INFRACTION_UNSPECIFIED) + assert.Equal(t, sdk.ZeroInt(), result) + + // Test with a valid infraction + // Note: This test assumes that QueueSlashPacket is implemented correctly + result = k.SlashWithInfractionReason(ctx, consAddr, infractionHeight, power, slashFactor, stakingtypes.Infraction_INFRACTION_DOUBLE_SIGN) + assert.Equal(t, sdk.ZeroInt(), result) +} + +func TestKeeper_MaxValidators(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + maxValidators := k.MaxValidators(ctx) + assert.Equal(t, uint32(0), maxValidators) // Assuming default is 0 +} + +func TestKeeper_GetAllValidators(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + validators := k.GetAllValidators(ctx) + assert.Empty(t, validators) +} + +func TestKeeper_IsValidatorJailed(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + consAddr := sdk.ConsAddress([]byte("test")) + + // Assuming HasOutstandingDowntime is implemented + isJailed := k.IsValidatorJailed(ctx, consAddr) + assert.False(t, isJailed) // Assuming default is false +} + +func TestKeeper_UnbondingTime(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + // Assuming GetUnbondingPeriod is implemented + unbondingTime := k.UnbondingTime(ctx) + assert.Equal(t, time.Duration(0), unbondingTime) // Assuming default is 0 +} + +func TestKeeper_ApplyAndReturnValidatorSetUpdates(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + updates, err := k.ApplyAndReturnValidatorSetUpdates(ctx) + require.NoError(t, err) + assert.Empty(t, updates) +} + +// TestKeeper_Panics tests the methods that are expected to panic +func TestKeeper_Panics(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + assert.Panics(t, func() { + k.Validator(ctx, sdk.ValAddress{}) + }) + + assert.Panics(t, func() { + k.Delegation(ctx, sdk.AccAddress{}, sdk.ValAddress{}) + }) +} diff --git a/x/appchain/subscriber/keeper/keeper.go b/x/appchain/subscriber/keeper/keeper.go index 9a73b1fba..d7fc833f3 100644 --- a/x/appchain/subscriber/keeper/keeper.go +++ b/x/appchain/subscriber/keeper/keeper.go @@ -1,19 +1,202 @@ package keeper import ( + "fmt" + "time" + + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/codec" storetypes "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" ) type Keeper struct { - cdc codec.BinaryCodec - storeKey storetypes.StoreKey + cdc codec.BinaryCodec + storeKey storetypes.StoreKey + accountKeeper commontypes.AccountKeeper + bankKeeper commontypes.BankKeeper + scopedKeeper commontypes.ScopedKeeper + portKeeper commontypes.PortKeeper + clientKeeper commontypes.ClientKeeper + connectionKeeper commontypes.ConnectionKeeper + channelKeeper commontypes.ChannelKeeper + ibcCoreKeeper commontypes.IBCCoreKeeper + ibcTransferKeeper commontypes.IBCTransferKeeper + feeCollectorName string } -// NewKeeper creates a new coordinator keeper. -func NewKeeper(cdc codec.BinaryCodec, storeKey storetypes.StoreKey) Keeper { +// NewKeeper creates a new subscriber keeper. +func NewKeeper( + cdc codec.BinaryCodec, + storeKey storetypes.StoreKey, + accountKeeper commontypes.AccountKeeper, + bankKeeper commontypes.BankKeeper, + scopedKeeper commontypes.ScopedKeeper, + portKeeper commontypes.PortKeeper, + clientKeeper commontypes.ClientKeeper, + connectionKeeper commontypes.ConnectionKeeper, + channelKeeper commontypes.ChannelKeeper, + ibcCoreKeeper commontypes.IBCCoreKeeper, + ibcTransferKeeper commontypes.IBCTransferKeeper, + feeCollectorName string, +) Keeper { return Keeper{ - cdc: cdc, - storeKey: storeKey, + cdc: cdc, + storeKey: storeKey, + accountKeeper: accountKeeper, + bankKeeper: bankKeeper, + scopedKeeper: scopedKeeper, + portKeeper: portKeeper, + clientKeeper: clientKeeper, + connectionKeeper: connectionKeeper, + channelKeeper: channelKeeper, + ibcCoreKeeper: ibcCoreKeeper, + ibcTransferKeeper: ibcTransferKeeper, + feeCollectorName: feeCollectorName, + } +} + +// Logger returns a module-specific logger. +func (k Keeper) Logger(ctx sdk.Context) log.Logger { + return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) +} + +// GetPort returns the portID for the IBC app module. Used in ExportGenesis +func (k Keeper) GetPort(ctx sdk.Context) string { + store := ctx.KVStore(k.storeKey) + return string(store.Get(types.PortKey())) +} + +// SetPort sets the portID for the IBC app module. Used in InitGenesis +func (k Keeper) SetPort(ctx sdk.Context, portID string) { + store := ctx.KVStore(k.storeKey) + store.Set(types.PortKey(), []byte(portID)) +} + +// IsBound checks if the IBC app module is already bound to the desired port +func (k Keeper) IsBound(ctx sdk.Context, portID string) bool { + _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) + return ok +} + +// BindPort defines a wrapper function for the port Keeper's function in +// order to expose it to module's InitGenesis function +func (k Keeper) BindPort(ctx sdk.Context, portID string) error { + capability := k.portKeeper.BindPort(ctx, portID) + return k.ClaimCapability(ctx, capability, host.PortPath(portID)) +} + +// ClaimCapability allows the IBC app module to claim a capability that core IBC +// passes to it +func (k Keeper) ClaimCapability( + ctx sdk.Context, + cap *capabilitytypes.Capability, + name string, +) error { + return k.scopedKeeper.ClaimCapability(ctx, cap, name) +} + +// GetPendingChanges gets the pending validator set changes that will be applied +// at the end of this block. +func (k Keeper) GetPendingChanges( + ctx sdk.Context, +) *commontypes.ValidatorSetChangePacketData { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.PendingChangesKey()) + if bz == nil { + return nil } + res := &commontypes.ValidatorSetChangePacketData{} + k.cdc.MustUnmarshal(bz, res) + return res +} + +// SetPendingChanges sets the pending validator set changes that will be applied +// at the end of this block. +func (k Keeper) SetPendingChanges( + ctx sdk.Context, + data *commontypes.ValidatorSetChangePacketData, +) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(data) + store.Set(types.PendingChangesKey(), bz) +} + +// DeletePendingChanges deletes the pending validator set changes that will be applied +// at the end of this block. +func (k Keeper) DeletePendingChanges(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.PendingChangesKey()) +} + +// SetPacketMaturityTime sets the maturity time for a given received VSC packet id +func (k Keeper) SetPacketMaturityTime( + ctx sdk.Context, vscID uint64, maturityTime time.Time, +) { + store := ctx.KVStore(k.storeKey) + maturingVSCPacket := &types.MaturingVSCPacket{ + ValidatorSetChangeID: vscID, + MaturityTime: maturityTime, + } + store.Set( + types.PacketMaturityTimeKey(maturityTime, vscID), + k.cdc.MustMarshal(maturingVSCPacket), + ) +} + +// GetElapsedVscPackets returns all VSC packets that have matured as of the current block time +func (k Keeper) GetElapsedVscPackets(ctx sdk.Context) []types.MaturingVSCPacket { + store := ctx.KVStore(k.storeKey) + prefix := []byte{types.PacketMaturityTimeBytePrefix} + iterator := sdk.KVStorePrefixIterator(store, prefix) + defer iterator.Close() + + var ret []types.MaturingVSCPacket + for ; iterator.Valid(); iterator.Next() { + var packet types.MaturingVSCPacket + k.cdc.MustUnmarshal(iterator.Value(), &packet) + // since these are stored in order of maturity time, we can break early + if ctx.BlockTime().Before(packet.MaturityTime) { + break + } + ret = append(ret, packet) + } + return ret +} + +// DeletePacketMaturityTime deletes the maturity time for a given received VSC packet id +func (k Keeper) DeletePacketMaturityTime( + ctx sdk.Context, vscID uint64, maturityTime time.Time, +) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.PacketMaturityTimeKey(maturityTime, vscID)) +} + +// DeleteOutstandingDowntime deletes the outstanding downtime flag for the given validator +// consensus address +func (k Keeper) DeleteOutstandingDowntime( + ctx sdk.Context, consAddress sdk.ConsAddress, +) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.OutstandingDowntimeKey(consAddress)) +} + +// SetOutstandingDowntime sets the outstanding downtime flag for the given validator +func (k Keeper) SetOutstandingDowntime( + ctx sdk.Context, consAddress sdk.ConsAddress, +) { + store := ctx.KVStore(k.storeKey) + store.Set(types.OutstandingDowntimeKey(consAddress), []byte{1}) +} + +// HasOutstandingDowntime returns true if the given validator has an outstanding downtime +func (k Keeper) HasOutstandingDowntime( + ctx sdk.Context, consAddress sdk.ConsAddress, +) bool { + store := ctx.KVStore(k.storeKey) + return store.Has(types.OutstandingDowntimeKey(consAddress)) } diff --git a/x/appchain/subscriber/keeper/params.go b/x/appchain/subscriber/keeper/params.go index 9ccc5c5b7..65484717f 100644 --- a/x/appchain/subscriber/keeper/params.go +++ b/x/appchain/subscriber/keeper/params.go @@ -1,23 +1,52 @@ package keeper import ( + "time" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" sdk "github.com/cosmos/cosmos-sdk/types" ) -// SetParams sets the appchain coordinator parameters. +// SetSubscriberParams sets the appchain subscriber parameters. // The caller must ensure that the params are valid. -func (k Keeper) SetParams(ctx sdk.Context, params commontypes.SubscriberParams) { +func (k Keeper) SetSubscriberParams(ctx sdk.Context, params commontypes.SubscriberParams) { store := ctx.KVStore(k.storeKey) bz := k.cdc.MustMarshal(¶ms) store.Set(types.ParamsKey(), bz) } -// GetParams gets the appchain coordinator parameters. -func (k Keeper) GetParams(ctx sdk.Context) (res commontypes.SubscriberParams) { +// GetSubscriberParams gets the appchain subscriber parameters. +func (k Keeper) GetSubscriberParams(ctx sdk.Context) (res commontypes.SubscriberParams) { store := ctx.KVStore(k.storeKey) bz := store.Get(types.ParamsKey()) k.cdc.MustUnmarshal(bz, &res) return res } + +// SetCoordinatorFeePoolAddrStr sets the coordinator fee pool address string. +// This parameter is not necessarily provided at genesis and can be set later. +// This is because the genesis is generated by the coordinator chain based mostly +// on the registration transaction. +func (k Keeper) SetCoordinatorFeePoolAddrStr(ctx sdk.Context, addrStr string) { + params := k.GetSubscriberParams(ctx) + params.CoordinatorFeePoolAddrStr = addrStr + k.SetSubscriberParams(ctx, params) +} + +// GetDistributionTransmissionChannel gets the distribution transmission channel. +func (k Keeper) GetDistributionTransmissionChannel(ctx sdk.Context) string { + return k.GetSubscriberParams(ctx).DistributionTransmissionChannel +} + +// SetDistributionTransmissionChannel sets the distribution transmission channel. +func (k Keeper) SetDistributionTransmissionChannel(ctx sdk.Context, channel string) { + params := k.GetSubscriberParams(ctx) + params.DistributionTransmissionChannel = channel + k.SetSubscriberParams(ctx, params) +} + +// GetUnbondingPeriod gets the unbonding period. +func (k Keeper) GetUnbondingPeriod(ctx sdk.Context) time.Duration { + return k.GetSubscriberParams(ctx).UnbondingPeriod +} diff --git a/x/appchain/subscriber/keeper/queue.go b/x/appchain/subscriber/keeper/queue.go new file mode 100644 index 000000000..4aeea11f3 --- /dev/null +++ b/x/appchain/subscriber/keeper/queue.go @@ -0,0 +1,91 @@ +package keeper + +import ( + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// getAndIncrementPendingPacketsIdx returns the current pending packets index and increments it. +func (k Keeper) getAndIncrementPendingPacketsIdx(ctx sdk.Context) (toReturn uint64) { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.PendingPacketsIndexKey()) + if bz != nil { + toReturn = sdk.BigEndianToUint64(bz) + } + toStore := toReturn + 1 + store.Set(types.PendingPacketsIndexKey(), sdk.Uint64ToBigEndian(toStore)) + return toReturn +} + +// AppendPendingPacket appends a packet to the pending packets queue, indexed by the current index. +func (k Keeper) AppendPendingPacket( + ctx sdk.Context, + packetType commontypes.SubscriberPacketDataType, + packet commontypes.WrappedSubscriberPacketData, +) { + store := ctx.KVStore(k.storeKey) + // it is appended to a key with idx value, and not an overall array + idx := k.getAndIncrementPendingPacketsIdx(ctx) + key := types.PendingDataPacketsKey(idx) + wrapped := commontypes.NewSubscriberPacketData(packetType, packet) + bz := k.cdc.MustMarshal(&wrapped) + store.Set(key, bz) +} + +// GetPendingPackets returns ALL the pending packets from the store without indexes. +func (k Keeper) GetPendingPackets(ctx sdk.Context) []commontypes.SubscriberPacketData { + ppWithIndexes := k.GetAllPendingPacketsWithIdx(ctx) + ppList := make([]commontypes.SubscriberPacketData, 0) + for _, ppWithIndex := range ppWithIndexes { + ppList = append(ppList, ppWithIndex.SubscriberPacketData) + } + return ppList +} + +// GetAllPendingPacketsWithIdx returns ALL pending packet data from the store +// with indexes relevant to the pending packets queue. +func (k Keeper) GetAllPendingPacketsWithIdx(ctx sdk.Context) []types.SubscriberPacketDataWithIdx { + packets := []types.SubscriberPacketDataWithIdx{} + store := ctx.KVStore(k.storeKey) + // Note: PendingDataPacketsBytePrefix is the correct prefix, NOT PendingDataPacketsByteKey. + // See consistency with PendingDataPacketsKey(). + iterator := sdk.KVStorePrefixIterator(store, []byte{types.PendingDataPacketsBytePrefix}) + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + var packet commontypes.SubscriberPacketData + bz := iterator.Value() + k.cdc.MustUnmarshal(bz, &packet) + packetWithIdx := types.SubscriberPacketDataWithIdx{ + SubscriberPacketData: packet, + // index stored in key after prefix, see PendingDataPacketsKey() + Idx: sdk.BigEndianToUint64(iterator.Key()[1:]), + } + packets = append(packets, packetWithIdx) + } + return packets +} + +// DeletePendingDataPackets deletes pending data packets with given indexes +func (k Keeper) DeletePendingDataPackets(ctx sdk.Context, idxs ...uint64) { + store := ctx.KVStore(k.storeKey) + for _, idx := range idxs { + store.Delete(types.PendingDataPacketsKey(idx)) + } +} + +// DeleteAllPendingDataPackets deletes all pending data packets from the store. +func (k Keeper) DeleteAllPendingDataPackets(ctx sdk.Context) { + store := ctx.KVStore(k.storeKey) + // Note: PendingDataPacketsBytePrefix is the correct prefix, NOT PendingDataPacketsByteKey. + // See consistency with PendingDataPacketsKey(). + iterator := sdk.KVStorePrefixIterator(store, []byte{types.PendingDataPacketsBytePrefix}) + keysToDel := [][]byte{} + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + keysToDel = append(keysToDel, iterator.Key()) + } + for _, key := range keysToDel { + store.Delete(key) + } +} diff --git a/x/appchain/subscriber/keeper/queue_test.go b/x/appchain/subscriber/keeper/queue_test.go new file mode 100644 index 000000000..1286f04f5 --- /dev/null +++ b/x/appchain/subscriber/keeper/queue_test.go @@ -0,0 +1,85 @@ +package keeper_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ExocoreNetwork/exocore/testutil/keeper" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" +) + +func TestKeeper_PendingPackets(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + t.Run("AppendPendingPacket and GetPendingPackets", func(t *testing.T) { + packetType := commontypes.VscMaturedPacket + packet := commontypes.NewVscMaturedPacketData(1) + wrappedPacket := &commontypes.SubscriberPacketData_VscMaturedPacketData{VscMaturedPacketData: packet} + + // Append a packet + k.AppendPendingPacket( + ctx, packetType, + wrappedPacket, + ) + + // Get all pending packets + packets := k.GetPendingPackets(ctx) + require.Len(t, packets, 1) + require.Equal(t, packetType, packets[0].Type) + require.Equal(t, packet.ValsetUpdateID, wrappedPacket.VscMaturedPacketData.ValsetUpdateID) + }) + + t.Run("GetAllPendingPacketsWithIdx", func(t *testing.T) { + // Append two more packets + k.AppendPendingPacket( + ctx, commontypes.VscMaturedPacket, + &commontypes.SubscriberPacketData_VscMaturedPacketData{VscMaturedPacketData: commontypes.NewVscMaturedPacketData(2)}, + ) + k.AppendPendingPacket( + ctx, commontypes.VscMaturedPacket, + &commontypes.SubscriberPacketData_VscMaturedPacketData{VscMaturedPacketData: commontypes.NewVscMaturedPacketData(3)}, + ) + + packetsWithIdx := k.GetAllPendingPacketsWithIdx(ctx) + require.Len(t, packetsWithIdx, 3) + require.Equal(t, uint64(0), packetsWithIdx[0].Idx) + require.Equal(t, uint64(1), packetsWithIdx[1].Idx) + require.Equal(t, uint64(2), packetsWithIdx[2].Idx) + }) + + t.Run("DeletePendingDataPackets", func(t *testing.T) { + // Delete the second packet + k.DeletePendingDataPackets(ctx, 1) + + packetsWithIdx := k.GetAllPendingPacketsWithIdx(ctx) + require.Len(t, packetsWithIdx, 2) + require.Equal(t, uint64(0), packetsWithIdx[0].Idx) + require.Equal(t, uint64(2), packetsWithIdx[1].Idx) + }) + + t.Run("DeleteAllPendingDataPackets", func(t *testing.T) { + k.DeleteAllPendingDataPackets(ctx) + + packets := k.GetPendingPackets(ctx) + require.Empty(t, packets) + }) + + t.Run("getAndIncrementPendingPacketsIdx", func(t *testing.T) { + k.DeleteAllPendingDataPackets(ctx) + // This is an internal method, so we'll test it indirectly + for i := 0; i < 5; i++ { + k.AppendPendingPacket( + ctx, commontypes.VscMaturedPacket, + &commontypes.SubscriberPacketData_VscMaturedPacketData{VscMaturedPacketData: commontypes.NewVscMaturedPacketData(uint64(i))}, + ) + } + + packetsWithIdx := k.GetAllPendingPacketsWithIdx(ctx) + require.Len(t, packetsWithIdx, 5) + firstIdx := packetsWithIdx[0].Idx + for i, packet := range packetsWithIdx { + require.Equal(t, firstIdx+uint64(i), packet.Idx) + } + }) +} diff --git a/x/appchain/subscriber/keeper/relay.go b/x/appchain/subscriber/keeper/relay.go new file mode 100644 index 000000000..4116c4846 --- /dev/null +++ b/x/appchain/subscriber/keeper/relay.go @@ -0,0 +1,305 @@ +package keeper + +import ( + "fmt" + "strconv" + + errorsmod "cosmossdk.io/errors" + + "github.com/ExocoreNetwork/exocore/utils" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +// OnRecvVSCPacket processes a validator set change packet +func (k Keeper) OnRecvVSCPacket( + ctx sdk.Context, + packet channeltypes.Packet, + data commontypes.ValidatorSetChangePacketData, +) exported.Acknowledgement { + coordinatorChannel, found := k.GetCoordinatorChannel(ctx) + if found && packet.SourceChannel != coordinatorChannel { + // should never happen + k.Logger(ctx).Error( + "received VSCPacket on non-coordinator channel", + "source channel", packet.SourceChannel, + "coordinator channel", coordinatorChannel, + ) + return nil + } + if !found { + // first message on channel + k.SetCoordinatorChannel(ctx, packet.SourceChannel) + k.Logger(ctx).Info( + "channel established", + "port", packet.DestinationPort, + "channel", packet.DestinationChannel, + ) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypeChannelEstablished, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(channeltypes.AttributeKeyChannelID, packet.DestinationChannel), + sdk.NewAttribute(channeltypes.AttributeKeyPortID, packet.DestinationPort), + ), + ) + } + // the changes are received within blocks, but can only be forwarded to + // Tendermint during EndBlock. hence, get the changes received so far, append to them + // and save them back + currentChanges := k.GetPendingChanges(ctx) + pendingChanges := utils.AccumulateChanges( + currentChanges.ValidatorUpdates, + data.ValidatorUpdates, + ) + + k.SetPendingChanges(ctx, &commontypes.ValidatorSetChangePacketData{ + ValidatorUpdates: pendingChanges, + }) + + // Save maturity time and packet + maturityTime := ctx.BlockTime().Add(k.GetUnbondingPeriod(ctx)) + k.SetPacketMaturityTime(ctx, data.ValsetUpdateID, maturityTime) + k.Logger(ctx).Info( + "packet maturity time was set", + "vscID", data.ValsetUpdateID, + "maturity time (utc)", maturityTime.UTC(), + "maturity time (nano)", uint64(maturityTime.UnixNano()), + ) + + // set height to VSC id mapping; it is effective as of the next block + k.SetValsetUpdateIDForHeight( + ctx, ctx.BlockHeight()+1, data.ValsetUpdateID, + ) + k.Logger(ctx).Info( + "block height was mapped to vscID", + "height", ctx.BlockHeight()+1, + "vscID", data.ValsetUpdateID, + ) + + // remove outstanding slashing flags of the validators + // for which the slashing was acknowledged by the coordinator chain + // TODO(mm): since this packet is only received when there are validator set changes + // there is some additional lag between the slashing occurrence on the coordinator + // and deletion of this flag on the subscriber. does it matter? + for _, consAddr := range data.GetSlashAcks() { + k.DeleteOutstandingDowntime(ctx, consAddr) + } + + k.Logger(ctx).Info( + "finished receiving/handling VSCPacket", + "vscID", data.ValsetUpdateID, + "len updates", len(data.ValidatorUpdates), + "len slash acks", len(data.SlashAcks), + ) + // Acknowledge the packet + return commontypes.NewResultAcknowledgementWithLog(ctx, commontypes.VscPacketHandledResult) +} + +// OnAcknowledgementPacket processes an acknowledgement packet +func (k Keeper) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + ack channeltypes.Acknowledgement, +) error { + // the ack can only be error when packet parsing is failed + // or the packet type is wrong, or when the slash packet + // has incorrect data. none of these should happen + if err := ack.GetError(); err != "" { + k.Logger(ctx).Error( + "recv ErrorAcknowledgement", + "channel", packet.SourceChannel, + "error", err, + ) + // Initiate ChanCloseInit using packet source (non-counterparty) port and channel + err := k.ChanCloseInit(ctx, packet.SourcePort, packet.SourceChannel) + if err != nil { + return fmt.Errorf("ChanCloseInit(%s) failed: %s", packet.SourceChannel, err.Error()) + } + // check if there is an established channel to coordinator + channelID, found := k.GetCoordinatorChannel(ctx) + if !found { + return errorsmod.Wrapf( + types.ErrNoProposerChannelID, + "recv ErrorAcknowledgement on non-established channel %s", + packet.SourceChannel, + ) + } + if channelID != packet.SourceChannel { + // Close the established channel as well + return k.ChanCloseInit(ctx, commontypes.SubscriberPortID, channelID) + } + } + return nil +} + +// QueueVscMaturedPackets queues all VSC packets that have matured as of the current block time, +// to be sent to the coordinator chain at the end of the block. +func (k Keeper) QueueVscMaturedPackets( + ctx sdk.Context, +) { + for _, packet := range k.GetElapsedVscPackets(ctx) { + vscPacket := commontypes.NewVscMaturedPacketData(packet.ValidatorSetChangeID) + k.AppendPendingPacket( + ctx, commontypes.VscMaturedPacket, + &commontypes.SubscriberPacketData_VscMaturedPacketData{ + VscMaturedPacketData: vscPacket, + }, + ) + k.DeletePacketMaturityTime(ctx, packet.ValidatorSetChangeID, packet.MaturityTime) + + k.Logger(ctx).Info("VSCMaturedPacket enqueued", "vscID", vscPacket.ValsetUpdateID) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeVSCMatured, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(commontypes.AttributeChainID, ctx.ChainID()), + sdk.NewAttribute( + types.AttributeSubscriberHeight, + strconv.Itoa(int(ctx.BlockHeight())), + ), + sdk.NewAttribute( + commontypes.AttributeValSetUpdateID, + strconv.Itoa(int(packet.ValidatorSetChangeID)), + ), + sdk.NewAttribute(types.AttributeTimestamp, ctx.BlockTime().String()), + ), + ) + } +} + +// QueueSlashPacket queues a slashing request to be sent to the coordinator chain at the end of the block. +func (k Keeper) QueueSlashPacket( + ctx sdk.Context, + validator abci.Validator, + valsetUpdateID uint64, + infraction stakingtypes.Infraction, +) { + consAddr := sdk.ConsAddress(validator.Address) + downtime := infraction == stakingtypes.Infraction_INFRACTION_DOWNTIME + + // return if an outstanding downtime request is set for the validator + if downtime && k.HasOutstandingDowntime(ctx, consAddr) { + return + } + + if downtime { + // set outstanding downtime to not send multiple + // slashing requests for the same downtime infraction + k.SetOutstandingDowntime(ctx, consAddr) + } + + // construct slash packet data + slashPacket := commontypes.NewSlashPacketData(validator, valsetUpdateID, infraction) + + // append the Slash packet data to pending data packets + k.AppendPendingPacket( + ctx, + commontypes.SlashPacket, + &commontypes.SubscriberPacketData_SlashPacketData{ + SlashPacketData: slashPacket, + }, + ) + + k.Logger(ctx).Info( + "SlashPacket enqueued", + "vscID", slashPacket.ValsetUpdateID, + "validator cons addr", fmt.Sprintf("%X", slashPacket.Validator.Address), + "infraction", slashPacket.Infraction, + ) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeSubscriberSlashRequest, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute( + commontypes.AttributeValidatorAddress, + fmt.Sprintf("%X", slashPacket.Validator.Address), + ), + sdk.NewAttribute( + commontypes.AttributeValSetUpdateID, + strconv.Itoa(int(valsetUpdateID)), + ), + sdk.NewAttribute(commontypes.AttributeInfractionType, infraction.String()), + ), + ) +} + +// IsChannelClosed returns a boolean whether a given channel is in the CLOSED state +func (k Keeper) IsChannelClosed(ctx sdk.Context, channelID string) bool { + channel, found := k.channelKeeper.GetChannel(ctx, commontypes.SubscriberPortID, channelID) + return !found || channel.State == channeltypes.CLOSED +} + +// SendPackets sends all pending packets to the coordinator chain +func (k Keeper) SendPackets(ctx sdk.Context) { + // find destination + channelID, ok := k.GetCoordinatorChannel(ctx) + if !ok { + return + } + // find packets, which will be returned sorted by index + pending := k.GetAllPendingPacketsWithIdx(ctx) + idxsForDeletion := []uint64{} + timeoutPeriod := k.GetSubscriberParams(ctx).IBCTimeoutPeriod + for i := range pending { + p := pending[i] + // Send packet over IBC + err := commontypes.SendIBCPacket( + ctx, + k.scopedKeeper, + k.channelKeeper, + channelID, // source channel id + commontypes.SubscriberPortID, // source port id + commontypes.ModuleCdc.MustMarshalJSON(&p.SubscriberPacketData), + timeoutPeriod, + ) + if err != nil { + if clienttypes.ErrClientNotActive.Is(err) { + // IBC client is expired! + // leave the packet data stored to be sent once the client is upgraded + k.Logger(ctx).Info( + "IBC client is expired, cannot send IBC packet; leaving packet data stored:", + "type", p.Type.String(), + ) + return + } + // Not able to send packet over IBC! + // Leave the packet data stored for the sent to be retried in the next block. + // Note that if VSCMaturedPackets are not sent for long enough, the coordinator + // will remove the subscriber anyway. + k.Logger(ctx).Error( + "cannot send IBC packet; leaving packet data stored:", + "type", p.Type.String(), "err", err.Error(), + ) + return + } + if p.Type == commontypes.VscMaturedPacket { + id := p.GetVscMaturedPacketData().ValsetUpdateID + k.Logger(ctx).Info( + "IBC packet sent", + "type", p.Type.String(), + "id", id, + ) + } else { + data := p.GetSlashPacketData() + addr := data.Validator.Address + k.Logger(ctx).Info( + "IBC packet sent", + "type", p.Type.String(), + "addr", addr, + ) + } + // Otherwise the vsc matured will be deleted + idxsForDeletion = append(idxsForDeletion, p.Idx) + } + // Delete pending packets that were successfully sent and did not return an error from SendIBCPacket + k.DeletePendingDataPackets(ctx, idxsForDeletion...) +} diff --git a/x/appchain/subscriber/keeper/rewards.go b/x/appchain/subscriber/keeper/rewards.go new file mode 100644 index 000000000..5da9d030a --- /dev/null +++ b/x/appchain/subscriber/keeper/rewards.go @@ -0,0 +1,199 @@ +package keeper + +import ( + "strconv" + + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + clienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" +) + +// EndBlockSendRewards distributes the rewards minted / collected so far amongst the coordinator and the subscriber. +func (k Keeper) EndBlockSendRewards(ctx sdk.Context) { + k.SplitRewardsInternally(ctx) + if !k.ShouldSendRewardsToCoordinator(ctx) { + return + } + // Try to send rewards to coordinator + cachedCtx, writeCache := ctx.CacheContext() + if err := k.SendRewardsToCoordinator(cachedCtx); err != nil { + k.Logger(ctx).Error("attempt to sent rewards to coordinator failed", "error", err) + } else { + // write cache + writeCache() + } + + // Update LastRewardTransmissionHeight + k.SetLastRewardTransmissionHeight(ctx, ctx.BlockHeight()) +} + +// DistributeRewardsInternally "distributes" the rewards within the subscriber chain by earmarking the rewards for +// the coordinator in a separate account. +func (k Keeper) SplitRewardsInternally(ctx sdk.Context) { + // source address, the local fee pool + subscriberFeePoolAddr := k.accountKeeper.GetModuleAccount( + ctx, k.feeCollectorName, + ).GetAddress() + // get all tokens in the fee pool - we distribute them all but transfer + // only the reward denomination + fpTokens := k.bankKeeper.GetAllBalances(ctx, subscriberFeePoolAddr) + if fpTokens.Empty() { + k.Logger(ctx).Error("no tokens in fee pool") + return + } + // fraction + frac, err := sdk.NewDecFromStr(k.GetSubscriberParams(ctx).SubscriberRedistributionFraction) + if err != nil { + // should not happen since we validated this in the params + panic(err) + } + // multiply all tokens by fraction + decFPTokens := sdk.NewDecCoinsFromCoins(fpTokens...) + subsRedistrTokens, _ := decFPTokens.MulDec(frac).TruncateDecimal() + if subsRedistrTokens.Empty() { + k.Logger(ctx).Error("no tokens (fractional) to distribute") + // we can safely return from here since nothing has been distributed so far + // so there is nothing to revert + return + } + // send them from the fee pool to queue for local distribution + // TODO(mm): implement SubscriberRedistributeName local distribution logic + // on what basis? + err = k.bankKeeper.SendCoinsFromModuleToModule(ctx, k.feeCollectorName, + types.SubscriberRedistributeName, subsRedistrTokens) + if err != nil { + // It is the common behavior in cosmos-sdk to panic if SendCoinsFromModuleToModule + // returns error. + panic(err) + } + // send the remaining tokens to the coordinator fee pool on the subscriber + remainingTokens := fpTokens.Sub(subsRedistrTokens...) + err = k.bankKeeper.SendCoinsFromModuleToModule(ctx, k.feeCollectorName, + types.SubscriberToSendToCoordinatorName, remainingTokens) + if err != nil { + // It is the common behavior in cosmos-sdk to panic if SendCoinsFromModuleToModule + // returns error. + panic(err) + } +} + +// Check whether it's time to send rewards to coordinator +func (k Keeper) ShouldSendRewardsToCoordinator(ctx sdk.Context) bool { + bpdt := k.GetSubscriberParams(ctx).BlocksPerDistributionTransmission + curHeight := ctx.BlockHeight() + ltbh := k.GetLastRewardTransmissionHeight(ctx) + diff := curHeight - ltbh + shouldSend := diff >= bpdt + return shouldSend +} + +// GetLastRewardTransmissionHeight returns the height of the last reward transmission +func (k Keeper) GetLastRewardTransmissionHeight( + ctx sdk.Context, +) int64 { + store := ctx.KVStore(k.storeKey) + bz := store.Get(types.LastRewardTransmissionHeightKey()) + return int64(sdk.BigEndianToUint64(bz)) +} + +// SetLastRewardTransmissionHeight sets the height of the last reward transmission +func (k Keeper) SetLastRewardTransmissionHeight( + ctx sdk.Context, + height int64, +) { + store := ctx.KVStore(k.storeKey) + store.Set( + types.LastRewardTransmissionHeightKey(), + sdk.Uint64ToBigEndian(uint64(height)), + ) +} + +// SendRewardsToCoordinator attempts to send to the coordinator (via IBC) +// all the block rewards allocated for the coordinator +func (k Keeper) SendRewardsToCoordinator(ctx sdk.Context) error { + sourceChannelID := k.GetDistributionTransmissionChannel(ctx) + transferChannel, found := k.channelKeeper.GetChannel( + ctx, transfertypes.PortID, sourceChannelID, + ) + if !found || transferChannel.State != channeltypes.OPEN { + k.Logger(ctx).Error("WARNING: cannot send rewards to coordinator;", + "transmission channel not in OPEN state", "channelID", sourceChannelID) + return nil + } + // due to timing it may happen that the channel is in TRYOPEN state + // on the counterparty, and in that case the transfer will fail. + // this is mitigated by having a sufficiently large reward distribution time + // and setting up the appchain-1 channel before the first distribution takes place. + // for localnet, i am using a value of 10 which is a bit low, but the subsequent + // distributions will be fine. another option is to create the channel immediately + // after a distribution is queued. that way, the channel will be open by the time + // of the next distribution. + + // get params for sending rewards + params := k.GetSubscriberParams(ctx) + toSendToCoordinatorAddr := k.accountKeeper.GetModuleAccount(ctx, + types.SubscriberToSendToCoordinatorName).GetAddress() // sender address + coordinatorAddr := params.CoordinatorFeePoolAddrStr // receiver address + timeoutHeight := clienttypes.ZeroHeight() + timeoutTimestamp := uint64(ctx.BlockTime().Add(params.IBCTimeoutPeriod).UnixNano()) + + denom := params.RewardDenom + balance := k.bankKeeper.GetBalance(ctx, toSendToCoordinatorAddr, denom) + + // if the balance is not zero, + if !balance.IsZero() { + packetTransfer := &transfertypes.MsgTransfer{ + SourcePort: transfertypes.PortID, + SourceChannel: sourceChannelID, + Token: balance, + Sender: toSendToCoordinatorAddr.String(), // subscriber address to send from + Receiver: coordinatorAddr, // coordinator fee pool address to send to + TimeoutHeight: timeoutHeight, // timeout height disabled + TimeoutTimestamp: timeoutTimestamp, + Memo: "subscriber chain rewards distribution", + } + // validate MsgTransfer before calling Transfer() + err := packetTransfer.ValidateBasic() + if err != nil { + return err + } + _, err = k.ibcTransferKeeper.Transfer(ctx, packetTransfer) + if err != nil { + return err + } + } else { + k.Logger(ctx).Error("cannot send rewards to coordinator", + "balance is zero", "denom", denom) + return nil + } + + k.Logger(ctx).Error("sent block rewards to coordinator", + "amount", balance.String(), + "denom", denom, + ) + currentHeight := ctx.BlockHeight() + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeFeeDistribution, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute( + types.AttributeDistributionCurrentHeight, + strconv.Itoa(int(currentHeight)), + ), + sdk.NewAttribute( + types.AttributeDistributionNextHeight, + strconv.Itoa(int(currentHeight+params.BlocksPerDistributionTransmission)), + ), + sdk.NewAttribute( + types.AttributeDistributionFraction, + params.SubscriberRedistributionFraction, + ), + sdk.NewAttribute(types.AttributeDistributionValue, balance.String()), + sdk.NewAttribute(types.AttributeDistributionDenom, denom), + ), + ) + + return nil +} diff --git a/x/appchain/subscriber/keeper/rewards_test.go b/x/appchain/subscriber/keeper/rewards_test.go new file mode 100644 index 000000000..ec9b2710f --- /dev/null +++ b/x/appchain/subscriber/keeper/rewards_test.go @@ -0,0 +1,181 @@ +package keeper_test + +import ( + "testing" + + testutilkeeper "github.com/ExocoreNetwork/exocore/testutil/keeper" + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/address" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestEndBlockSendRewards(t *testing.T) { + keeper, ctx, mocks := testutilkeeper.NewSubscriberKeeper(t) + + // Set up expectations + mocks.BankKeeper.EXPECT().GetAllBalances(gomock.Any(), gomock.Any()).Return(sdk.NewCoins(sdk.NewCoin("stake", sdk.NewInt(1000)))) + mocks.AccountKeeper.EXPECT().GetModuleAccount(gomock.Any(), gomock.Any()).Return(&mockModuleAccount{sdk.AccAddress(address.Module("fee_collector", []byte{}))}).Times(2) + mocks.BankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + + // Set up params + params := commontypes.DefaultSubscriberParams() + params.BlocksPerDistributionTransmission = 100 + params.CoordinatorFeePoolAddrStr = sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String() + keeper.SetSubscriberParams(ctx, params) + + // Test when it's not time to send rewards + keeper.SetLastRewardTransmissionHeight(ctx, ctx.BlockHeight()) + keeper.EndBlockSendRewards(ctx) + + // Test when it's time to send rewards + keeper.SetDistributionTransmissionChannel(ctx, "channel-0") + keeper.SetLastRewardTransmissionHeight(ctx, ctx.BlockHeight()-100) + require.True(t, keeper.ShouldSendRewardsToCoordinator(ctx)) + + // Set up expectations + mocks.BankKeeper.EXPECT().GetAllBalances(gomock.Any(), gomock.Any()).Return(sdk.NewCoins(sdk.NewCoin("stake", sdk.NewInt(1000)))) + mocks.BankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(2) + mocks.AccountKeeper.EXPECT().GetModuleAccount(gomock.Any(), gomock.Any()).Return(&mockModuleAccount{sdk.AccAddress(address.Module("fee_collector", []byte{}))}) + mocks.ChannelKeeper.EXPECT().GetChannel(gomock.Any(), gomock.Any(), gomock.Any()).Return(channeltypes.Channel{State: channeltypes.OPEN}, true) + mocks.BankKeeper.EXPECT().GetBalance(gomock.Any(), gomock.Any(), gomock.Any()).Return(sdk.NewCoin("stake", sdk.NewInt(200))) + mocks.IBCTransferKeeper.EXPECT().Transfer(gomock.Any(), gomock.Any()).Return(nil, nil) + keeper.EndBlockSendRewards(ctx) + + // Verify that LastRewardTransmissionHeight was updated + require.Equal(t, ctx.BlockHeight(), keeper.GetLastRewardTransmissionHeight(ctx)) +} + +func TestSplitRewardsInternally(t *testing.T) { + keeper, ctx, mocks := testutilkeeper.NewSubscriberKeeper(t) + + // Set up expectations + feePoolAddr := sdk.AccAddress(address.Module("fee_collector", []byte{})) + mocks.AccountKeeper.EXPECT().GetModuleAccount(gomock.Any(), "fee_collector").Return(&mockModuleAccount{sdk.AccAddress(feePoolAddr)}) + + initialBalance := sdk.NewCoins(sdk.NewCoin("stake", sdk.NewInt(1000))) + mocks.BankKeeper.EXPECT().GetAllBalances(gomock.Any(), feePoolAddr).Return(initialBalance) + + // Set up params + params := commontypes.DefaultSubscriberParams() + params.SubscriberRedistributionFraction = "0.3" + keeper.SetSubscriberParams(ctx, params) + + // Expect two transfers + mocks.BankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), "fee_collector", types.SubscriberRedistributeName, gomock.Any()).Return(nil) + mocks.BankKeeper.EXPECT().SendCoinsFromModuleToModule(gomock.Any(), "fee_collector", types.SubscriberToSendToCoordinatorName, gomock.Any()).Return(nil) + + keeper.SplitRewardsInternally(ctx) +} + +func TestShouldSendRewardsToCoordinator(t *testing.T) { + keeper, ctx, _ := testutilkeeper.NewSubscriberKeeper(t) + + // Set up params + params := commontypes.DefaultSubscriberParams() + params.BlocksPerDistributionTransmission = 100 + keeper.SetSubscriberParams(ctx, params) + + // Test when it's not time to send rewards + keeper.SetLastRewardTransmissionHeight(ctx, ctx.BlockHeight()-99) + require.False(t, keeper.ShouldSendRewardsToCoordinator(ctx)) + + // Test when it's time to send rewards + keeper.SetLastRewardTransmissionHeight(ctx, ctx.BlockHeight()-100) + require.True(t, keeper.ShouldSendRewardsToCoordinator(ctx)) +} + +func TestSendRewardsToCoordinator(t *testing.T) { + keeper, ctx, mocks := testutilkeeper.NewSubscriberKeeper(t) + + // Set up expectations + mocks.ChannelKeeper.EXPECT().GetChannel(gomock.Any(), transfertypes.PortID, gomock.Any()).Return(channeltypes.Channel{State: channeltypes.OPEN}, true).Times(2) + + toSendAddr := sdk.AccAddress(address.Module(types.SubscriberToSendToCoordinatorName, []byte{})) + mocks.AccountKeeper.EXPECT().GetModuleAccount(gomock.Any(), types.SubscriberToSendToCoordinatorName).Return(&mockModuleAccount{sdk.AccAddress(toSendAddr)}).Times(2) + + // Set up params + params := commontypes.DefaultSubscriberParams() + params.RewardDenom = "stake" + params.CoordinatorFeePoolAddrStr = sdk.AccAddress(testutiltx.GenerateAddress().Bytes()).String() + keeper.SetSubscriberParams(ctx, params) + + keeper.SetDistributionTransmissionChannel(ctx, "channel-0") + + // Test when balance is zero + mocks.BankKeeper.EXPECT().GetBalance(gomock.Any(), toSendAddr, "stake").Return(sdk.NewCoin("stake", sdk.ZeroInt())) + err := keeper.SendRewardsToCoordinator(ctx) + require.NoError(t, err) + + // Test when balance is non-zero + mocks.BankKeeper.EXPECT().GetBalance(gomock.Any(), toSendAddr, "stake").Return(sdk.NewCoin("stake", sdk.NewInt(1000))) + mocks.IBCTransferKeeper.EXPECT().Transfer(gomock.Any(), gomock.Any()).Return(nil, nil) + err = keeper.SendRewardsToCoordinator(ctx) + require.NoError(t, err) +} + +type mockModuleAccount struct { + sdk.AccAddress +} + +func (m *mockModuleAccount) GetAddress() sdk.AccAddress { + return m.AccAddress +} + +func (m *mockModuleAccount) SetAddress(addr sdk.AccAddress) error { + m.AccAddress = addr + return nil +} + +func (m *mockModuleAccount) GetPubKey() cryptotypes.PubKey { + return nil +} + +func (m *mockModuleAccount) SetPubKey(pubKey cryptotypes.PubKey) error { + return nil +} + +func (m *mockModuleAccount) GetAccountNumber() uint64 { + return 0 +} + +func (m *mockModuleAccount) SetAccountNumber(num uint64) error { + return nil +} + +func (m *mockModuleAccount) GetSequence() uint64 { + return 0 +} + +func (m *mockModuleAccount) SetSequence(seq uint64) error { + return nil +} + +func (m *mockModuleAccount) GetName() string { + return "mock" +} + +func (m *mockModuleAccount) GetPermissions() []string { + return []string{} +} + +func (m *mockModuleAccount) HasPermission(string) bool { + return false +} + +func (m *mockModuleAccount) String() string { + return "mock" +} + +func (m *mockModuleAccount) ProtoMessage() {} + +func (m *mockModuleAccount) Reset() {} + +var _ authtypes.ModuleAccountI = &mockModuleAccount{} diff --git a/x/appchain/subscriber/keeper/validators.go b/x/appchain/subscriber/keeper/validators.go new file mode 100644 index 000000000..b6482db16 --- /dev/null +++ b/x/appchain/subscriber/keeper/validators.go @@ -0,0 +1,251 @@ +package keeper + +import ( + keytypes "github.com/ExocoreNetwork/exocore/types/keys" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + types "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + abci "github.com/cometbft/cometbft/abci/types" + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" +) + +// SetValsetUpdateIDForHeight sets the valset update ID for a given height +func (k Keeper) SetValsetUpdateIDForHeight( + ctx sdk.Context, height int64, valsetUpdateID uint64, +) { + store := ctx.KVStore(k.storeKey) + store.Set(types.ValsetUpdateIDKey(height), sdk.Uint64ToBigEndian(valsetUpdateID)) +} + +// GetValsetUpdateIDForHeight gets the valset update ID for a given height +func (k Keeper) GetValsetUpdateIDForHeight( + ctx sdk.Context, height int64, +) uint64 { + store := ctx.KVStore(k.storeKey) + key := types.ValsetUpdateIDKey(height) + if !store.Has(key) { + return 0 + } + bz := store.Get(key) + return sdk.BigEndianToUint64(bz) +} + +// ApplyValidatorChanges is a wrapper function that returns the provided validator set +// update. The wrapping allows to save the validator set information in the store. +// The caller should (but _not_ must) provide `changes` that are different from the +// ones already with Tendermint. +func (k Keeper) ApplyValidatorChanges( + ctx sdk.Context, + // in dogfood, we use the wrappedkeywithpower because the operator module provides + // keys in that format. since the subscriber chain doesn't need the operator module + // we can use the tm validator update type. + changes []abci.ValidatorUpdate, +) []abci.ValidatorUpdate { + ret := make([]abci.ValidatorUpdate, 0, len(changes)) + logger := k.Logger(ctx) + for i := range changes { + change := changes[i] // avoid implicit memory aliasing + wrappedKey := keytypes.NewWrappedConsKeyFromTmProtoKey(&change.PubKey) + if wrappedKey == nil { + // an error in deserializing the key would indicate that the coordinator + // has provided invalid data. this is a critical error and should be + // investigated. + logger.Error( + "failed to deserialize validator key", + "i", i, "validator", change.PubKey, + ) + continue + } + consAddress := wrappedKey.ToConsAddr() + val, found := k.GetSubscriberValidator(ctx, consAddress) + switch found { + case true: + if change.Power < 1 { + logger.Info("deleting validator", "consAddress", consAddress) + k.DeleteSubscriberValidator(ctx, consAddress) + } else { + logger.Info("updating validator", "consAddress", consAddress) + val.Power = change.Power + k.SetSubscriberValidator(ctx, val) + } + case false: + if change.Power > 0 { + ocVal, err := commontypes.NewSubscriberValidator( + consAddress, change.Power, wrappedKey.ToSdkKey(), + ) + if err != nil { + // cannot happen, but just in case add this check. + // simply skip the validator if it does. + logger.Error( + "failed to instantiate validator", + "i", i, "validator", change.PubKey, + ) + continue + } + logger.Info("adding validator", "consAddress", consAddress) + k.SetSubscriberValidator(ctx, ocVal) + } else { + // edge case: we received an update for 0 power + // but the validator is already deleted. Do not forward + // to tendermint. + logger.Info( + "received update for non-existent validator", + "cons address", consAddress, + ) + continue + } + } + ret = append(ret, change) + } + return ret +} + +// SetSubscriberValidator stores a validator based on the pub key derived address. +func (k Keeper) SetSubscriberValidator( + ctx sdk.Context, validator commontypes.SubscriberValidator, +) { + store := ctx.KVStore(k.storeKey) + bz := k.cdc.MustMarshal(&validator) + + store.Set(types.SubscriberValidatorKey(validator.ConsAddress), bz) +} + +// GetSubscriberValidator gets a validator based on the pub key derived (consensus) address. +func (k Keeper) GetSubscriberValidator( + ctx sdk.Context, addr sdk.ConsAddress, +) (validator commontypes.SubscriberValidator, found bool) { + store := ctx.KVStore(k.storeKey) + v := store.Get(types.SubscriberValidatorKey(addr)) + if v == nil { + return + } + k.cdc.MustUnmarshal(v, &validator) + found = true + + return +} + +// DeleteSubscriberValidator deletes a validator based on the pub key derived address. +func (k Keeper) DeleteSubscriberValidator(ctx sdk.Context, addr sdk.ConsAddress) { + store := ctx.KVStore(k.storeKey) + key := types.SubscriberValidatorKey(addr) + if store.Has(key) { + store.Delete(key) + } else { + k.Logger(ctx).Info("validator not found", "address", addr) + } +} + +// GetAllSubscriberValidators returns all validators in the store. +func (k Keeper) GetAllSubscriberValidators( + ctx sdk.Context, +) (validators []commontypes.SubscriberValidator) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.SubscriberValidatorBytePrefix}) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + val := commontypes.SubscriberValidator{} + k.cdc.MustUnmarshal(iterator.Value(), &val) + validators = append(validators, val) + } + + return validators +} + +// GetHistoricalInfo gets the historical info at a given height +func (k Keeper) GetHistoricalInfo( + ctx sdk.Context, + height int64, +) (stakingtypes.HistoricalInfo, bool) { + store := ctx.KVStore(k.storeKey) + key := types.HistoricalInfoKey(height) + + value := store.Get(key) + if value == nil { + return stakingtypes.HistoricalInfo{}, false + } + + return stakingtypes.MustUnmarshalHistoricalInfo(k.cdc, value), true +} + +// SetHistoricalInfo sets the historical info at a given height +func (k Keeper) SetHistoricalInfo( + ctx sdk.Context, + height int64, + hi *stakingtypes.HistoricalInfo, +) { + store := ctx.KVStore(k.storeKey) + key := types.HistoricalInfoKey(height) + value := k.cdc.MustMarshal(hi) + + store.Set(key, value) +} + +// DeleteHistoricalInfo deletes the historical info at a given height +func (k Keeper) DeleteHistoricalInfo(ctx sdk.Context, height int64) { + store := ctx.KVStore(k.storeKey) + key := types.HistoricalInfoKey(height) + + store.Delete(key) +} + +// TrackHistoricalInfo saves the latest historical-info and deletes the oldest +// heights that are below pruning height +func (k Keeper) TrackHistoricalInfo(ctx sdk.Context) { + numHistoricalEntries := int64(k.GetParams(ctx).HistoricalEntries) + + // Prune store to ensure we only have parameter-defined historical entries. + // In most cases, this will involve removing a single historical entry. + // In the rare scenario when the historical entries gets reduced to a lower value k' + // from the original value k. k - k' entries must be deleted from the store. + // Since the entries to be deleted are always in a continuous range, we can iterate + // over the historical entries starting from the most recent version to be pruned + // and then return at the first empty entry. + for i := ctx.BlockHeight() - numHistoricalEntries; i >= 0; i-- { + _, found := k.GetHistoricalInfo(ctx, i) + if found { + k.DeleteHistoricalInfo(ctx, i) + } else { + break + } + } + + // if there is no need to persist historicalInfo, return + if numHistoricalEntries == 0 { + return + } + + // Create HistoricalInfo struct + lastVals := []stakingtypes.Validator{} + for _, v := range k.GetAllSubscriberValidators(ctx) { + pk, err := v.ConsPubKey() + if err != nil { + // This should never happen as the pubkey is assumed + // to be stored correctly in ApplyCCValidatorChanges. + panic(err) + } + val, err := stakingtypes.NewValidator(nil, pk, stakingtypes.Description{}) + if err != nil { + // This should never happen as the pubkey is assumed + // to be stored correctly in ApplyCCValidatorChanges. + panic(err) + } + + // Set validator to bonded status + val.Status = stakingtypes.Bonded + // Compute tokens from voting power + val.Tokens = sdk.TokensFromConsensusPower( + v.Power, sdk.DefaultPowerReduction, + ) + lastVals = append(lastVals, val) + } + + // Create historical info entry which sorts the validator set by voting power + historicalEntry := stakingtypes.NewHistoricalInfo( + ctx.BlockHeader(), lastVals, sdk.DefaultPowerReduction, + ) + + // Set latest HistoricalInfo at current height + k.SetHistoricalInfo(ctx, ctx.BlockHeight(), &historicalEntry) +} diff --git a/x/appchain/subscriber/keeper/validators_test.go b/x/appchain/subscriber/keeper/validators_test.go new file mode 100644 index 000000000..8f54780f2 --- /dev/null +++ b/x/appchain/subscriber/keeper/validators_test.go @@ -0,0 +1,94 @@ +package keeper_test + +import ( + "testing" + + "github.com/ExocoreNetwork/exocore/testutil/keeper" + testutiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/stretchr/testify/require" +) + +func TestKeeper_ValsetUpdateID(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + t.Run("Set and Get ValsetUpdateID", func(t *testing.T) { + height := int64(100) + expectedID := uint64(12345) + + k.SetValsetUpdateIDForHeight(ctx, height, expectedID) + actualID := k.GetValsetUpdateIDForHeight(ctx, height) + + require.Equal(t, expectedID, actualID) + }) + + t.Run("Get non-existent ValsetUpdateID", func(t *testing.T) { + height := int64(200) + actualID := k.GetValsetUpdateIDForHeight(ctx, height) + + require.Equal(t, uint64(0), actualID) + }) +} + +func TestKeeper_SubscriberValidator(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + t.Run("Set, Get, and Delete SubscriberValidator", func(t *testing.T) { + consAddr := testutiltx.GenerateConsAddress() + pubKey := testutiltx.GenerateConsensusKey() + power := int64(1000) + + validator, err := commontypes.NewSubscriberValidator(consAddr, power, pubKey.ToSdkKey()) + require.NoError(t, err) + + // Set validator + k.SetSubscriberValidator(ctx, validator) + + // Get validator + gotValidator, found := k.GetSubscriberValidator(ctx, consAddr) + require.True(t, found) + require.Equal(t, validator, gotValidator) + + // Delete validator + k.DeleteSubscriberValidator(ctx, consAddr) + + // Try to get deleted validator + _, found = k.GetSubscriberValidator(ctx, consAddr) + require.False(t, found) + }) + + t.Run("Delete non-existent SubscriberValidator", func(t *testing.T) { + consAddr := testutiltx.GenerateConsAddress() + + // This should not panic + k.DeleteSubscriberValidator(ctx, consAddr) + }) +} + +func TestKeeper_GetAllSubscriberValidators(t *testing.T) { + k, ctx, _ := keeper.NewSubscriberKeeper(t) + + t.Run("Get all SubscriberValidators", func(t *testing.T) { + // Create and set multiple validators + validators := []commontypes.SubscriberValidator{} + for i := 0; i < 5; i++ { + consAddr := testutiltx.GenerateConsAddress() + pubKey := testutiltx.GenerateConsensusKey() + power := int64(1000 + i) + + validator, err := commontypes.NewSubscriberValidator(consAddr, power, pubKey.ToSdkKey()) + require.NoError(t, err) + + k.SetSubscriberValidator(ctx, validator) + validators = append(validators, validator) + } + + // Get all validators + gotValidators := k.GetAllSubscriberValidators(ctx) + + require.Equal(t, len(validators), len(gotValidators)) + for _, validator := range validators { + require.Contains(t, gotValidators, validator) + } + }) +} diff --git a/x/appchain/subscriber/module.go b/x/appchain/subscriber/module.go index ede8b7a0f..d2c6a51a2 100644 --- a/x/appchain/subscriber/module.go +++ b/x/appchain/subscriber/module.go @@ -1,4 +1,4 @@ -package dogfood +package subscriber import ( "context" @@ -163,10 +163,50 @@ func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock contains the logic that is automatically triggered at the beginning of each block func (am AppModule) BeginBlock(ctx sdk.Context, _ abci.RequestBeginBlock) { - am.keeper.BeginBlock(ctx) + channelID, found := am.keeper.GetCoordinatorChannel(ctx) + if found && am.keeper.IsChannelClosed(ctx, channelID) { + // we are now PoA + am.keeper.Logger(ctx). + Error("coordinator channel is closed, we are now PoA", "channelId", channelID) + } + + // get height of the yet-to-be-made block + height := ctx.BlockHeight() + // this should either be the last known vscId + // or the one set by the last processed vsc packet + // since that processing applies to the next block + vscID := am.keeper.GetValsetUpdateIDForHeight(ctx, height) + am.keeper.SetValsetUpdateIDForHeight(ctx, height+1, vscID) + am.keeper.Logger(ctx).Debug( + "block height was mapped to vscID", + "height", height, "vscID", vscID, + ) + + am.keeper.TrackHistoricalInfo(ctx) } // EndBlock contains the logic that is automatically triggered at the end of each block func (am AppModule) EndBlock(ctx sdk.Context, _ abci.RequestEndBlock) []abci.ValidatorUpdate { - return am.keeper.EndBlock(ctx) + // send rewards to coordinator + am.keeper.EndBlockSendRewards(ctx) + + // queue maturity packets to coordinator + am.keeper.QueueVscMaturedPackets(ctx) + // remember that slash packets are queued in the subscriber module + // by the slashing and evidence modules when a slashing event is observed by them + + // broadcast queued packets to coordinator + am.keeper.SendPackets(ctx) + + // apply validator changes and then delete them + data := am.keeper.GetPendingChanges(ctx) + if len(data.ValidatorUpdates) == 0 { + return []abci.ValidatorUpdate{} + } + updates := am.keeper.ApplyValidatorChanges(ctx, data.ValidatorUpdates) + am.keeper.DeletePendingChanges(ctx) + if len(updates) > 0 { + am.keeper.Logger(ctx).Info("applying validator updates", "updates", updates) + } + return updates } diff --git a/x/appchain/subscriber/module_ibc.go b/x/appchain/subscriber/module_ibc.go new file mode 100644 index 000000000..edb8a30c4 --- /dev/null +++ b/x/appchain/subscriber/module_ibc.go @@ -0,0 +1,375 @@ +package subscriber + +import ( + "fmt" + "strings" + + errorsmod "cosmossdk.io/errors" + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/keeper" + "github.com/ExocoreNetwork/exocore/x/appchain/subscriber/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v7/modules/apps/transfer/types" + channeltypes "github.com/cosmos/ibc-go/v7/modules/core/04-channel/types" + porttypes "github.com/cosmos/ibc-go/v7/modules/core/05-port/types" + host "github.com/cosmos/ibc-go/v7/modules/core/24-host" + ibcexported "github.com/cosmos/ibc-go/v7/modules/core/exported" +) + +// IBCModule is the IBC module for the subscriber module. +type IBCModule struct { + keeper keeper.Keeper +} + +// interface guard +var _ porttypes.IBCModule = IBCModule{} + +// NewIBCModule creates a new IBCModule instance +func NewIBCModule(k keeper.Keeper) IBCModule { + return IBCModule{ + keeper: k, + } +} + +// OnChanOpenInit implements the IBCModule interface for the subscriber module. +// The function is called when the channel is created, typically by the relayer, +// which must be informed that the channel should be created on this chain. +// Starting the channel on the coordinator chain is not supported. +func (im IBCModule) OnChanOpenInit( + ctx sdk.Context, + order channeltypes.Order, + connectionHops []string, + portID string, + channelID string, + chanCap *capabilitytypes.Capability, + counterparty channeltypes.Counterparty, + version string, +) (string, error) { + im.keeper.Logger(ctx).Debug( + "OnChanOpenInit", + ) + + // ICS26 requires that it is set to the default version if empty + if strings.TrimSpace(version) == "" { + version = commontypes.Version + } + // check if channel has already been initialized + if storedChannel, ok := im.keeper.GetCoordinatorChannel(ctx); ok { + return "", errorsmod.Wrapf(commontypes.ErrDuplicateChannel, + "channel already exists with ID %s", storedChannel) + } + + // check channel params (subscriber end) + if order != channeltypes.ORDERED { + return "", errorsmod.Wrapf( + channeltypes.ErrInvalidChannelOrdering, + "expected %s channel, got %s ", + channeltypes.ORDERED, + order, + ) + } + // we set our port at genesis. check that the port of the channel is the same + if boundPort := im.keeper.GetPort(ctx); portID != boundPort { + return "", errorsmod.Wrapf( + porttypes.ErrInvalidPort, + "invalid port ID: %s, expected: %s", + portID, boundPort, + ) + } + // check that the version is correct + if version != commontypes.Version { + return "", errorsmod.Wrapf( + commontypes.ErrInvalidVersion, + "invalid version: %s, expected: %s", + version, commontypes.Version, + ) + } + // check channel params (coordinator end) + if counterparty.PortId != commontypes.CoordinatorPortID { + return "", errorsmod.Wrapf( + porttypes.ErrInvalidPort, + "invalid counterparty port ID: %s, expected: %s", + counterparty.PortId, commontypes.CoordinatorPortID, + ) + } + + // claim channel capability passed back by IBC module + if err := im.keeper.ClaimCapability( + ctx, chanCap, + host.ChannelCapabilityPath(portID, channelID), + ); err != nil { + return "", err + } + + // check connection hops, connection, and the client id (set on genesis) + if err := im.keeper.VerifyCoordinatorChain(ctx, connectionHops); err != nil { + return "", err + } + + return commontypes.Version, nil +} + +// OnChanOpenTry implements the IBCModule interface. It rejects attempts by +// the counterparty chain to open a channel here, since our spec requires +// that the channel is opened by this chain. +func (im IBCModule) OnChanOpenTry( + ctx sdk.Context, + _ channeltypes.Order, + _ []string, + _ string, + _ string, + _ *capabilitytypes.Capability, + _ channeltypes.Counterparty, + _ string, +) (string, error) { + im.keeper.Logger(ctx).Debug( + "OnChanOpenTry", + ) + return "", errorsmod.Wrap( + commontypes.ErrInvalidChannelFlow, + "channel handshake must be initiated by subscriber chain", + ) +} + +// OnChanOpenAck implements the IBCModule interface. It is ran after `OnChanOpenTry` +// is run on the counterparty chain. +func (im IBCModule) OnChanOpenAck( + ctx sdk.Context, + portID string, + channelID string, + _ string, // unused as per spec + counterpartyMetadata string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanOpenAck", + ) + + // ensure coordinator channel has not already been created + if coordinatorChannel, ok := im.keeper.GetCoordinatorChannel(ctx); ok { + return errorsmod.Wrapf(commontypes.ErrDuplicateChannel, + "coordinator channel: %s already established", coordinatorChannel) + } + + var md commontypes.HandshakeMetadata + // Marshal by coordinator and Unmarshal by subscriber. Don't use `must`, + // because we don't trust the source since it wasn't generated here. + if err := (&md).Unmarshal([]byte(counterpartyMetadata)); err != nil { + return errorsmod.Wrapf( + commontypes.ErrInvalidHandshakeMetadata, + "error unmarshalling ibc-ack metadata: \n%v", err, + ) + } + + if md.Version != commontypes.Version { + return errorsmod.Wrapf( + commontypes.ErrInvalidVersion, + "invalid counterparty version: %s, expected %s", + md.Version, + commontypes.Version, + ) + } + + // This address is not required to be supplied at the time of chain registration. + // Rather, it is set later by the coordinator chain. + im.keeper.SetCoordinatorFeePoolAddrStr(ctx, md.CoordinatorFeePoolAddr) + + /////////////////////////////////////////////////// + // Initialize distribution token transfer channel + + // First check if an existing transfer channel already exists. + transChannelID := im.keeper.GetDistributionTransmissionChannel(ctx) + if found := im.keeper.TransferChannelExists(ctx, transChannelID); found { + return nil + } + + // NOTE The handshake for this channel is handled by the ibc-go/transfer + // module. If the transfer-channel fails here (unlikely) then the transfer + // channel should be manually created and parameters set accordingly. + + // reuse the connection hops for this channel for the + // transfer channel being created. + connHops, err := im.keeper.GetConnectionHops(ctx, portID, channelID) + if err != nil { + return err + } + + distrTransferMsg := channeltypes.NewMsgChannelOpenInit( + transfertypes.PortID, + transfertypes.Version, + channeltypes.UNORDERED, + connHops, + transfertypes.PortID, + "", // signer unused + ) + + resp, err := im.keeper.ChannelOpenInit(ctx, distrTransferMsg) + if err != nil { + return err + } + im.keeper.SetDistributionTransmissionChannel(ctx, resp.ChannelId) + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + types.EventTypeFeeTransferChannelOpened, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(channeltypes.AttributeKeyChannelID, channelID), + sdk.NewAttribute(channeltypes.AttributeKeyPortID, types.PortID), + ), + ) + + return nil +} + +// OnChanOpenConfirm implements the IBCModule interface +func (im IBCModule) OnChanOpenConfirm( + ctx sdk.Context, + _ string, + _ string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanOpenConfirm", + ) + + return errorsmod.Wrap( + commontypes.ErrInvalidChannelFlow, + "channel handshake must be initiated by subscriber chain", + ) +} + +// OnChanCloseInit implements the IBCModule interface +func (im IBCModule) OnChanCloseInit( + ctx sdk.Context, + _ string, + channelID string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanCloseInit", + ) + + // allow relayers to close duplicate OPEN channels, if the coordinator channel has already + // been established + if coordinatorChannel, ok := im.keeper.GetCoordinatorChannel(ctx); ok && + coordinatorChannel != channelID { + return nil + } + return errorsmod.Wrap(sdkerrors.ErrInvalidRequest, "user cannot close channel") +} + +// OnChanCloseConfirm implements the IBCModule interface +func (im IBCModule) OnChanCloseConfirm( + ctx sdk.Context, + _ string, + _ string, +) error { + im.keeper.Logger(ctx).Debug( + "OnChanCloseConfirm", + ) + return nil +} + +// OnRecvPacket implements the IBCModule interface +func (im IBCModule) OnRecvPacket( + ctx sdk.Context, + packet channeltypes.Packet, + _ sdk.AccAddress, +) ibcexported.Acknowledgement { + im.keeper.Logger(ctx).Debug( + "OnRecvPacket", + ) + var ( + ack ibcexported.Acknowledgement + data commontypes.ValidatorSetChangePacketData + ) + if err := types.ModuleCdc.UnmarshalJSON(packet.GetData(), &data); err != nil { + errAck := commontypes.NewErrorAcknowledgementWithLog( + ctx, fmt.Errorf("cannot unmarshal packet data"), + ) + ack = &errAck + } else { + im.keeper.Logger(ctx).Debug( + "OnRecvPacket", + "packet data", data, + ) + ack = im.keeper.OnRecvVSCPacket(ctx, packet, data) + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(commontypes.AttributeKeyAckSuccess, fmt.Sprintf("%t", ack != nil)), + ), + ) + + return ack +} + +// OnAcknowledgementPacket implements the IBCModule interface +func (im IBCModule) OnAcknowledgementPacket( + ctx sdk.Context, + packet channeltypes.Packet, + acknowledgement []byte, + _ sdk.AccAddress, +) error { + im.keeper.Logger(ctx).Debug( + "OnAcknowledgementPacket", + ) + var ack channeltypes.Acknowledgement + if err := commontypes.ModuleCdc.UnmarshalJSON(acknowledgement, &ack); err != nil { + return errorsmod.Wrapf( + sdkerrors.ErrUnknownRequest, + "cannot unmarshal subscriber packet acknowledgement: %v", + err, + ) + } + + if err := im.keeper.OnAcknowledgementPacket(ctx, packet, ack); err != nil { + return err + } + + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + sdk.NewAttribute(commontypes.AttributeKeyAck, ack.String()), + ), + ) + switch resp := ack.Response.(type) { + case *channeltypes.Acknowledgement_Result: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(commontypes.AttributeKeyAckSuccess, string(resp.Result)), + ), + ) + case *channeltypes.Acknowledgement_Error: + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypePacket, + sdk.NewAttribute(commontypes.AttributeKeyAckError, resp.Error), + ), + ) + } + return nil +} + +// OnTimeoutPacket implements the IBCModule interface +func (im IBCModule) OnTimeoutPacket( + ctx sdk.Context, + _ channeltypes.Packet, + _ sdk.AccAddress, +) error { + im.keeper.Logger(ctx).Debug( + "OnTimeoutPacket", + ) + ctx.EventManager().EmitEvent( + sdk.NewEvent( + commontypes.EventTypeTimeout, + sdk.NewAttribute(sdk.AttributeKeyModule, types.ModuleName), + ), + ) + + return nil +} diff --git a/x/appchain/subscriber/types/errors.go b/x/appchain/subscriber/types/errors.go new file mode 100644 index 000000000..7bc9189a4 --- /dev/null +++ b/x/appchain/subscriber/types/errors.go @@ -0,0 +1,10 @@ +package types + +import ( + errorsmod "cosmossdk.io/errors" +) + +// x/subscriber module sentinel errors +var ( + ErrNoProposerChannelID = errorsmod.Register(ModuleName, 2, "no established channel") +) diff --git a/x/appchain/subscriber/types/events.go b/x/appchain/subscriber/types/events.go new file mode 100644 index 000000000..21952f56b --- /dev/null +++ b/x/appchain/subscriber/types/events.go @@ -0,0 +1,16 @@ +package types + +const ( + EventTypeFeeDistribution = "fee_distribution" + EventTypeFeeTransferChannelOpened = "fee_transfer_channel_opened" + EventTypeSubscriberSlashRequest = "subscriber_slash_request" + EventTypeVSCMatured = "vsc_matured" + + AttributeDistributionCurrentHeight = "distribution_current_height" + AttributeDistributionDenom = "distribution_denom" + AttributeDistributionFraction = "distribution_fraction" + AttributeDistributionNextHeight = "distribution_next_height" + AttributeDistributionValue = "distribution_value" + AttributeSubscriberHeight = "subscriber_height" + AttributeTimestamp = "timestamp" +) diff --git a/x/appchain/subscriber/types/genesis.go b/x/appchain/subscriber/types/genesis.go index 7d5e514e6..ca211ece9 100644 --- a/x/appchain/subscriber/types/genesis.go +++ b/x/appchain/subscriber/types/genesis.go @@ -10,7 +10,9 @@ func DefaultGenesis() *GenesisState { } // NewGenesis creates a new genesis state with the provided parameters and -// data. +// data. Since most of the genesis fields are filled by the coordinator, +// the subscriber module only needs to fill the subscriber params. +// Even those will be overwritten. func NewGenesis(params commontypes.SubscriberParams) *GenesisState { return &GenesisState{Params: params} } diff --git a/x/appchain/subscriber/types/genesis.pb.go b/x/appchain/subscriber/types/genesis.pb.go index 4d3f210f0..9933621a3 100644 --- a/x/appchain/subscriber/types/genesis.pb.go +++ b/x/appchain/subscriber/types/genesis.pb.go @@ -8,15 +8,19 @@ import ( types "github.com/ExocoreNetwork/exocore/x/appchain/common/types" _ "github.com/cosmos/gogoproto/gogoproto" proto "github.com/cosmos/gogoproto/proto" + github_com_cosmos_gogoproto_types "github.com/cosmos/gogoproto/types" + _ "google.golang.org/protobuf/types/known/timestamppb" io "io" math "math" math_bits "math/bits" + time "time" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +var _ = time.Kitchen // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. @@ -26,8 +30,17 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // GenesisState is the genesis state for the appchain subscriber module. type GenesisState struct { - // Params is the parameters for the appchain subscriber module. + // The first two fields are word-for-word pulled from `common.proto`, to be + // filled by the coordinator module (or an export). + // params is the parameters for the appchain subscriber module. Params types.SubscriberParams `protobuf:"bytes,1,opt,name=params,proto3" json:"params"` + // coordinator is the coordinator information for the subscriber. + Coordinator types.CoordinatorInfo `protobuf:"bytes,2,opt,name=coordinator,proto3" json:"coordinator"` + // Below are the IBC parameters + // coordinator_client_id is the client id of the coordinator chain. + CoordinatorClientID string `protobuf:"bytes,3,opt,name=coordinator_client_id,json=coordinatorClientId,proto3" json:"coordinator_client_id,omitempty"` + // coordinator_channel_id is the channel id of the coordinator chain. + CoordinatorChannelID string `protobuf:"bytes,4,opt,name=coordinator_channel_id,json=coordinatorChannelId,proto3" json:"coordinator_channel_id,omitempty"` } func (m *GenesisState) Reset() { *m = GenesisState{} } @@ -70,8 +83,89 @@ func (m *GenesisState) GetParams() types.SubscriberParams { return types.SubscriberParams{} } +func (m *GenesisState) GetCoordinator() types.CoordinatorInfo { + if m != nil { + return m.Coordinator + } + return types.CoordinatorInfo{} +} + +func (m *GenesisState) GetCoordinatorClientID() string { + if m != nil { + return m.CoordinatorClientID + } + return "" +} + +func (m *GenesisState) GetCoordinatorChannelID() string { + if m != nil { + return m.CoordinatorChannelID + } + return "" +} + +// MaturingVSCPacket represents a vsc packet that is maturing internal to the +// subscriber module, where it has not yet relayed a VSCMatured packet back. +// While it is technically feasible to store this just as a key in the state, +// keeping it as a separate type allows exporting the genesis data. +// The key used is prefix + time + vscId. +type MaturingVSCPacket struct { + // vsc_id is the id of the VSC that is maturing. + ValidatorSetChangeID uint64 `protobuf:"varint,1,opt,name=vsc_id,json=vscId,proto3" json:"vsc_id,omitempty"` + // maturity_time is the time at which the VSC will mature. + MaturityTime time.Time `protobuf:"bytes,2,opt,name=maturity_time,json=maturityTime,proto3,stdtime" json:"maturity_time"` +} + +func (m *MaturingVSCPacket) Reset() { *m = MaturingVSCPacket{} } +func (m *MaturingVSCPacket) String() string { return proto.CompactTextString(m) } +func (*MaturingVSCPacket) ProtoMessage() {} +func (*MaturingVSCPacket) Descriptor() ([]byte, []int) { + return fileDescriptor_f608de439fd2c5db, []int{1} +} +func (m *MaturingVSCPacket) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MaturingVSCPacket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MaturingVSCPacket.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MaturingVSCPacket) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaturingVSCPacket.Merge(m, src) +} +func (m *MaturingVSCPacket) XXX_Size() int { + return m.Size() +} +func (m *MaturingVSCPacket) XXX_DiscardUnknown() { + xxx_messageInfo_MaturingVSCPacket.DiscardUnknown(m) +} + +var xxx_messageInfo_MaturingVSCPacket proto.InternalMessageInfo + +func (m *MaturingVSCPacket) GetValidatorSetChangeID() uint64 { + if m != nil { + return m.ValidatorSetChangeID + } + return 0 +} + +func (m *MaturingVSCPacket) GetMaturityTime() time.Time { + if m != nil { + return m.MaturityTime + } + return time.Time{} +} + func init() { proto.RegisterType((*GenesisState)(nil), "exocore.appchain.subscriber.v1.GenesisState") + proto.RegisterType((*MaturingVSCPacket)(nil), "exocore.appchain.subscriber.v1.MaturingVSCPacket") } func init() { @@ -79,22 +173,35 @@ func init() { } var fileDescriptor_f608de439fd2c5db = []byte{ - // 232 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x49, 0xad, 0xc8, 0x4f, - 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0x2c, 0x28, 0x48, 0xce, 0x48, 0xcc, 0xcc, 0xd3, 0x2f, 0x2e, 0x4d, - 0x2a, 0x4e, 0x2e, 0xca, 0x4c, 0x4a, 0x2d, 0xd2, 0x2f, 0x33, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, - 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x83, 0xaa, 0xd6, 0x83, 0xa9, 0xd6, - 0x43, 0xa8, 0xd6, 0x2b, 0x33, 0x94, 0x52, 0xc7, 0x30, 0x2d, 0x39, 0x3f, 0x37, 0x37, 0x3f, 0x0f, - 0x64, 0x12, 0x84, 0x05, 0x31, 0x48, 0x4a, 0x24, 0x3d, 0x3f, 0x3d, 0x1f, 0xcc, 0xd4, 0x07, 0xb1, - 0x20, 0xa2, 0x4a, 0x51, 0x5c, 0x3c, 0xee, 0x10, 0xfb, 0x82, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0xbc, - 0xb8, 0xd8, 0x0a, 0x12, 0x8b, 0x12, 0x73, 0x8b, 0x25, 0x18, 0x15, 0x18, 0x35, 0xb8, 0x8d, 0x74, - 0xf4, 0x30, 0xec, 0x87, 0x9a, 0x5a, 0x66, 0xa8, 0x17, 0x0c, 0x77, 0x49, 0x00, 0x58, 0x8f, 0x13, - 0xcb, 0x89, 0x7b, 0xf2, 0x0c, 0x41, 0x50, 0x13, 0x9c, 0xc2, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, - 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x09, 0x8f, 0xe5, 0x18, 0x2e, 0x3c, 0x96, 0x63, 0xb8, - 0xf1, 0x58, 0x8e, 0x21, 0xca, 0x36, 0x3d, 0xb3, 0x24, 0xa3, 0x34, 0x09, 0x64, 0x94, 0xbe, 0x2b, - 0xc4, 0x7c, 0xbf, 0xd4, 0x92, 0xf2, 0xfc, 0xa2, 0x6c, 0x7d, 0x98, 0x77, 0x2a, 0xb0, 0x06, 0x4f, - 0x49, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0xed, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, - 0x3c, 0xba, 0x2f, 0x3b, 0x4a, 0x01, 0x00, 0x00, + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0x9b, 0x51, 0x2a, 0xf0, 0xc6, 0x81, 0xac, 0x40, 0xd5, 0x43, 0x32, 0xed, 0xc2, 0x24, + 0x26, 0x5b, 0x85, 0x33, 0x97, 0xb6, 0x08, 0x05, 0xc4, 0x34, 0x25, 0x68, 0x48, 0x5c, 0x2a, 0xc7, + 0xf1, 0x52, 0x6b, 0x8d, 0x1d, 0xd9, 0x6e, 0xd8, 0x3e, 0x05, 0xfb, 0x58, 0x3b, 0x70, 0xd8, 0x91, + 0x53, 0x41, 0xe9, 0x17, 0x41, 0xb6, 0x13, 0x56, 0x54, 0xb4, 0xdb, 0xf3, 0xf3, 0xff, 0xfd, 0xfc, + 0xf4, 0xff, 0x1b, 0x1c, 0xd3, 0x4b, 0x41, 0x84, 0xa4, 0x08, 0x97, 0x25, 0x99, 0x63, 0xc6, 0x91, + 0x5a, 0xa6, 0x8a, 0x48, 0x96, 0x52, 0x89, 0xaa, 0x11, 0xca, 0x29, 0xa7, 0x8a, 0x29, 0x58, 0x4a, + 0xa1, 0x85, 0x1f, 0x34, 0x6a, 0xd8, 0xaa, 0xe1, 0x9d, 0x1a, 0x56, 0xa3, 0xe1, 0xcb, 0x2d, 0x1a, + 0x11, 0x45, 0x21, 0xb8, 0x21, 0xb9, 0xca, 0x81, 0x86, 0xfd, 0x5c, 0xe4, 0xc2, 0x96, 0xc8, 0x54, + 0x4d, 0x37, 0xcc, 0x85, 0xc8, 0x17, 0x14, 0xd9, 0x53, 0xba, 0x3c, 0x47, 0x9a, 0x15, 0x54, 0x69, + 0x5c, 0x94, 0x4e, 0x70, 0xf8, 0x63, 0x07, 0xec, 0xbd, 0x77, 0x1b, 0x25, 0x1a, 0x6b, 0xea, 0x7f, + 0x00, 0xbd, 0x12, 0x4b, 0x5c, 0xa8, 0x81, 0x77, 0xe0, 0x1d, 0xed, 0xbe, 0x3e, 0x86, 0x5b, 0x1b, + 0x36, 0xef, 0x56, 0x23, 0x98, 0xfc, 0xdd, 0xf5, 0xd4, 0xce, 0x8c, 0xbb, 0x37, 0xab, 0xb0, 0x13, + 0x37, 0x04, 0x3f, 0x01, 0xbb, 0x44, 0x08, 0x99, 0x31, 0x8e, 0xb5, 0x90, 0x83, 0x1d, 0x0b, 0x7c, + 0x75, 0x1f, 0x70, 0x72, 0x27, 0x8f, 0xf8, 0xb9, 0x68, 0x78, 0x9b, 0x14, 0xff, 0x23, 0x78, 0xb6, + 0x71, 0x9c, 0x91, 0x05, 0xa3, 0x5c, 0xcf, 0x58, 0x36, 0x78, 0x70, 0xe0, 0x1d, 0x3d, 0x1e, 0xbf, + 0xa8, 0x57, 0xe1, 0xfe, 0x06, 0x66, 0x62, 0xef, 0xa3, 0x69, 0xbc, 0x4f, 0xb6, 0x9a, 0x99, 0x7f, + 0x02, 0x9e, 0xff, 0x03, 0x9b, 0x63, 0xce, 0xe9, 0xc2, 0xd0, 0xba, 0x96, 0x36, 0xa8, 0x57, 0x61, + 0x7f, 0x93, 0xe6, 0x04, 0xd1, 0x34, 0xee, 0x93, 0xed, 0x6e, 0x76, 0xf8, 0xdd, 0x03, 0x4f, 0x3f, + 0x61, 0xbd, 0x94, 0x8c, 0xe7, 0x67, 0xc9, 0xe4, 0x14, 0x93, 0x0b, 0xaa, 0x7d, 0x04, 0x7a, 0x95, + 0x22, 0x86, 0x6a, 0x3c, 0xed, 0x3a, 0xea, 0x19, 0x5e, 0xb0, 0xcc, 0x4c, 0x27, 0x54, 0x1b, 0x40, + 0x4e, 0xa3, 0x69, 0xfc, 0xb0, 0x52, 0x24, 0xca, 0xfc, 0x08, 0x3c, 0x29, 0x2c, 0x45, 0x5f, 0xcd, + 0x4c, 0x62, 0x8d, 0x75, 0x43, 0xe8, 0xe2, 0x84, 0x6d, 0x9c, 0xf0, 0x73, 0x1b, 0xe7, 0xf8, 0x91, + 0x71, 0xea, 0xfa, 0x57, 0xe8, 0xc5, 0x7b, 0xed, 0xa8, 0xb9, 0x1c, 0x7f, 0xb9, 0xa9, 0x03, 0xef, + 0xb6, 0x0e, 0xbc, 0xdf, 0x75, 0xe0, 0x5d, 0xaf, 0x83, 0xce, 0xed, 0x3a, 0xe8, 0xfc, 0x5c, 0x07, + 0x9d, 0xaf, 0x6f, 0x73, 0xa6, 0xe7, 0xcb, 0xd4, 0xb8, 0x8f, 0xde, 0xb9, 0x48, 0x4e, 0xa8, 0xfe, + 0x26, 0xe4, 0x05, 0x6a, 0x3f, 0xdd, 0xe5, 0x7f, 0x3f, 0xb1, 0xbe, 0x2a, 0xa9, 0x4a, 0x7b, 0x76, + 0x89, 0x37, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x44, 0xfd, 0x3d, 0x44, 0xf0, 0x02, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -117,6 +224,30 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.CoordinatorChannelID) > 0 { + i -= len(m.CoordinatorChannelID) + copy(dAtA[i:], m.CoordinatorChannelID) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.CoordinatorChannelID))) + i-- + dAtA[i] = 0x22 + } + if len(m.CoordinatorClientID) > 0 { + i -= len(m.CoordinatorClientID) + copy(dAtA[i:], m.CoordinatorClientID) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.CoordinatorClientID))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Coordinator.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 { size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) if err != nil { @@ -130,6 +261,42 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MaturingVSCPacket) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaturingVSCPacket) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaturingVSCPacket) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + n3, err3 := github_com_cosmos_gogoproto_types.StdTimeMarshalTo(m.MaturityTime, dAtA[i-github_com_cosmos_gogoproto_types.SizeOfStdTime(m.MaturityTime):]) + if err3 != nil { + return 0, err3 + } + i -= n3 + i = encodeVarintGenesis(dAtA, i, uint64(n3)) + i-- + dAtA[i] = 0x12 + if m.ValidatorSetChangeID != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.ValidatorSetChangeID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { offset -= sovGenesis(v) base := offset @@ -149,6 +316,30 @@ func (m *GenesisState) Size() (n int) { _ = l l = m.Params.Size() n += 1 + l + sovGenesis(uint64(l)) + l = m.Coordinator.Size() + n += 1 + l + sovGenesis(uint64(l)) + l = len(m.CoordinatorClientID) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + l = len(m.CoordinatorChannelID) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + return n +} + +func (m *MaturingVSCPacket) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValidatorSetChangeID != 0 { + n += 1 + sovGenesis(uint64(m.ValidatorSetChangeID)) + } + l = github_com_cosmos_gogoproto_types.SizeOfStdTime(m.MaturityTime) + n += 1 + l + sovGenesis(uint64(l)) return n } @@ -220,6 +411,205 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Coordinator", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Coordinator.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoordinatorClientID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoordinatorClientID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CoordinatorChannelID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CoordinatorChannelID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaturingVSCPacket) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaturingVSCPacket: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaturingVSCPacket: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorSetChangeID", wireType) + } + m.ValidatorSetChangeID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValidatorSetChangeID |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaturityTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := github_com_cosmos_gogoproto_types.StdTimeUnmarshal(&m.MaturityTime, dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) diff --git a/x/appchain/subscriber/types/keys.go b/x/appchain/subscriber/types/keys.go index e67ac4d65..b5f90f12c 100644 --- a/x/appchain/subscriber/types/keys.go +++ b/x/appchain/subscriber/types/keys.go @@ -1,5 +1,12 @@ package types +import ( + time "time" + + "github.com/ExocoreNetwork/exocore/utils" + sdk "github.com/cosmos/cosmos-sdk/types" +) + const ( // ModuleName defines the module name ModuleName = "subscriber" @@ -16,15 +23,119 @@ const ( // PortID is the default port id that module binds to PortID = "subscriber" + // SubscriberRedistributeName is the name of the fee pool address that stores + // the tokens that aren't sent to the coordinator SubscriberRedistributeName = "subscriber_redistribute" + // SubscriberToSendToCoordinatorName is the name of the fee pool address that + // stores the tokens that are sent to the coordinator SubscriberToSendToCoordinatorName = "subscriber_to_send_to_coordinator" ) const ( + // FirstValsetUpdateID is the first update ID for the validator set + FirstValsetUpdateID uint64 = 0 +) + +const ( + // ParamsBytePrefix is the prefix for the params key ParamsBytePrefix byte = iota + 1 + // PortBytePrefix is the prefix for the port key + PortBytePrefix + // CoordinatorClientIDBytePrefix is the prefix for the coordinator client ID key + CoordinatorClientIDBytePrefix + // ValsetUpdateIDBytePrefix is the prefix for the valset update ID key + ValsetUpdateIDBytePrefix + // SubscriberValidatorBytePrefix is the prefix for the subscriber validator key + SubscriberValidatorBytePrefix + // CoordinatorChannelBytePrefix is the prefix for the coordinator channel key + CoordinatorChannelBytePrefix + // PendingChangesBytePrefix is the prefix for the pending changes key + PendingChangesBytePrefix + // PacketMaturityTimeBytePrefix is the prefix for the packet maturity time key + PacketMaturityTimeBytePrefix + // OutstandingDowntimeBytePrefix is the prefix for the outstanding downtime key + OutstandingDowntimeBytePrefix + // PendingPacketsIndexBytePrefix is the prefix for the pending packets index key + PendingPacketsIndexBytePrefix + // PendingDataPacketsBytePrefix is the prefix for the pending data packets key + PendingDataPacketsBytePrefix + // HistoricalInfoBytePrefix is the prefix for the historical info key + HistoricalInfoBytePrefix + // LastRewardTransmissionHeightBytePrefix is the prefix for the last reward transmission height key + LastRewardTransmissionHeightBytePrefix ) +// ParamsKey returns the key for the params func ParamsKey() []byte { return []byte{ParamsBytePrefix} } + +// PortKey returns the key for the port (hello Harry Potter!) +func PortKey() []byte { + return []byte{PortBytePrefix} +} + +// CoordinatorClientIDKey returns the key for the coordinator client ID +func CoordinatorClientIDKey() []byte { + return []byte{CoordinatorClientIDBytePrefix} +} + +// ValsetUpdateIDKey returns the key for the valset update ID against the provided height. +func ValsetUpdateIDKey(height int64) []byte { + return append( + []byte{ValsetUpdateIDBytePrefix}, + sdk.Uint64ToBigEndian(uint64(height))..., + ) +} + +// SubscriberValidatorKey returns the key for the subscriber chain validator +// against the provided address. +func SubscriberValidatorKey(address sdk.ConsAddress) []byte { + return append([]byte{SubscriberValidatorBytePrefix}, address...) +} + +// CoordinatorChannelKey returns the key for which the ibc channel id to the coordinator chain +// is stored. +func CoordinatorChannelKey() []byte { + return []byte{CoordinatorChannelBytePrefix} +} + +// PendingChangesKey returns the key for the pending changes +func PendingChangesKey() []byte { + return []byte{PendingChangesBytePrefix} +} + +// PacketMaturityTimeKey returns the key for the packet maturity time +func PacketMaturityTimeKey(maturityTime time.Time, vscID uint64) []byte { + return utils.AppendMany( + []byte{PacketMaturityTimeBytePrefix}, + sdk.FormatTimeBytes(maturityTime), + sdk.Uint64ToBigEndian(vscID), + ) +} + +// OutstandingDowntimeKey returns the key for the outstanding downtime +func OutstandingDowntimeKey(consAddress sdk.ConsAddress) []byte { + return append([]byte{OutstandingDowntimeBytePrefix}, consAddress.Bytes()...) +} + +// PendingPacketsIndexKey returns the key for the pending packets index +func PendingPacketsIndexKey() []byte { + return []byte{PendingPacketsIndexBytePrefix} +} + +// PendingDataPacketsKey returns the key for the pending data packets +func PendingDataPacketsKey(idx uint64) []byte { + return append([]byte{PendingDataPacketsBytePrefix}, sdk.Uint64ToBigEndian(idx)...) +} + +// HistoricalInfoKey returns the key for the historical info +func HistoricalInfoKey(height int64) []byte { + return append([]byte{HistoricalInfoBytePrefix}, sdk.Uint64ToBigEndian(uint64(height))...) +} + +// LastRewardTransmissionHeightKey is the key for the last reward transmission height +func LastRewardTransmissionHeightKey() []byte { + return []byte{LastRewardTransmissionHeightBytePrefix} +} diff --git a/x/appchain/subscriber/types/types.go b/x/appchain/subscriber/types/types.go index ab1254f4c..edc465b46 100644 --- a/x/appchain/subscriber/types/types.go +++ b/x/appchain/subscriber/types/types.go @@ -1 +1,11 @@ package types + +import ( + commontypes "github.com/ExocoreNetwork/exocore/x/appchain/common/types" +) + +// SubscriberPacketDataWithIdx is a wrapper struct for SubscriberPacketData with an index. +type SubscriberPacketDataWithIdx struct { + commontypes.SubscriberPacketData + Idx uint64 +} diff --git a/x/avs/keeper/avs.go b/x/avs/keeper/avs.go index 88029157b..443a32bfb 100644 --- a/x/avs/keeper/avs.go +++ b/x/avs/keeper/avs.go @@ -80,6 +80,20 @@ func (k *Keeper) GetEpochEndAVSs(ctx sdk.Context, epochIdentifier string, ending return avsList } +// GetEpochEndChainIDs returns a list of chainIDs for AVSs which are scheduled to start at the end of the +// current epoch, or the beginning of the next one. The chainIDs used are without the revision number. +func (k Keeper) GetEpochEndChainIDs(ctx sdk.Context, epochIdentifier string, endingEpochNumber int64) []string { + avsList := k.GetEpochEndAVSs(ctx, epochIdentifier, endingEpochNumber) + var chainIDs []string + for _, avsAddr := range avsList { + chainID, found := k.GetChainIDByAVSAddr(ctx, avsAddr) + if found { + chainIDs = append(chainIDs, chainID) + } + } + return chainIDs +} + // GetAVSInfoByTaskAddress returns the AVS which containing this task address // A task contract address can only be used by one avs // TODO:this function is frequently used while its implementation iterates over existing avs to find the target avs by task contract address, we should use a reverse mapping to avoid iteration @@ -125,13 +139,15 @@ func (k *Keeper) GetTaskStatisticalEpochEndAVSs(ctx sdk.Context, epochIdentifier // AssetIDs, EpochsUntilUnbonded, EpochIdentifier, MinSelfDelegation and StartingEpoch. // This will ensure compatibility with all of the related AVS functions, like // GetEpochEndAVSs, GetAVSSupportedAssets, and GetAVSMinimumSelfDelegation. +// The caller must use a chainID without the revision number if the AVS is intended +// to outlive the upgrades, for example, the x/dogfood AVS. The same cannot be said +// for x/appchain AVSs wherein the post-upgrade operators may not be the same. func (k Keeper) RegisterAVSWithChainID( oCtx sdk.Context, params *types.AVSRegisterOrDeregisterParams, ) (avsAddr common.Address, err error) { // guard against errors ctx, writeFunc := oCtx.CacheContext() // remove the version number and validate - params.ChainID = types.ChainIDWithoutRevision(params.ChainID) if len(params.ChainID) == 0 { return common.Address{}, errorsmod.Wrap(types.ErrNotNull, "RegisterAVSWithChainID: chainID is null") } diff --git a/x/avs/keeper/keeper.go b/x/avs/keeper/keeper.go index 50265144d..93319a971 100644 --- a/x/avs/keeper/keeper.go +++ b/x/avs/keeper/keeper.go @@ -13,6 +13,7 @@ import ( errorsmod "cosmossdk.io/errors" + "github.com/ExocoreNetwork/exocore/utils" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" "github.com/cometbft/cometbft/libs/log" "github.com/cosmos/cosmos-sdk/codec" @@ -91,7 +92,7 @@ func (k Keeper) UpdateAVSInfo(ctx sdk.Context, params *types.AVSRegisterOrDeregi return errorsmod.Wrap(types.ErrAlreadyRegistered, fmt.Sprintf("this TaskAddr has already been used by other AVS,the TaskAddr is :%s", params.TaskAddr)) } startingEpoch := uint64(epoch.CurrentEpoch + 1) - if params.ChainID == types.ChainIDWithoutRevision(ctx.ChainID()) { + if params.ChainID == utils.ChainIDWithoutRevision(ctx.ChainID()) { // TODO: handle this better startingEpoch = uint64(epoch.CurrentEpoch) } diff --git a/x/avs/keeper/query.go b/x/avs/keeper/query.go index 94b32532f..896a75ccf 100644 --- a/x/avs/keeper/query.go +++ b/x/avs/keeper/query.go @@ -4,6 +4,7 @@ import ( "context" "strconv" + "github.com/ExocoreNetwork/exocore/utils" "github.com/ExocoreNetwork/exocore/x/avs/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -23,7 +24,7 @@ func (k Keeper) QueryAVSTaskInfo(ctx context.Context, req *types.QueryAVSTaskInf // QueryAVSAddrByChainID is an implementation of the QueryAVSAddrByChainID gRPC method func (k Keeper) QueryAVSAddrByChainID(ctx context.Context, req *types.QueryAVSAddrByChainIDReq) (*types.QueryAVSAddrByChainIDResponse, error) { c := sdk.UnwrapSDKContext(ctx) - isChainAvs, avsAddr := k.IsAVSByChainID(c, types.ChainIDWithoutRevision(req.ChainID)) + isChainAvs, avsAddr := k.IsAVSByChainID(c, utils.ChainIDWithoutRevision(req.ChainID)) if !isChainAvs { return nil, types.ErrNotYetRegistered } diff --git a/x/avs/types/types.go b/x/avs/types/types.go index 5a031c568..9c8d3cba4 100644 --- a/x/avs/types/types.go +++ b/x/avs/types/types.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum/go-ethereum/accounts/abi" - ibcclienttypes "github.com/cosmos/ibc-go/v7/modules/core/02-client/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/crypto" @@ -88,16 +87,6 @@ var ( } ) -// ChainIDWithoutRevision returns the chainID without the revision number. -// For example, "exocoretestnet_233-1" returns "exocoretestnet_233". -func ChainIDWithoutRevision(chainID string) string { - if !ibcclienttypes.IsRevisionFormat(chainID) { - return chainID - } - splitStr := strings.Split(chainID, "-") - return splitStr[0] -} - // GenerateAVSAddr generates a hex AVS address based on the chainID. // It returns a hex address as a string. func GenerateAVSAddr(chainID string) string { diff --git a/x/dogfood/keeper/abci.go b/x/dogfood/keeper/abci.go index 067b952f9..e7f6b5a78 100644 --- a/x/dogfood/keeper/abci.go +++ b/x/dogfood/keeper/abci.go @@ -4,7 +4,6 @@ import ( "cosmossdk.io/math" keytypes "github.com/ExocoreNetwork/exocore/types/keys" "github.com/ExocoreNetwork/exocore/utils" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" abci "github.com/cometbft/cometbft/abci/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -20,7 +19,7 @@ func (k Keeper) EndBlock(ctx sdk.Context) []abci.ValidatorUpdate { return []abci.ValidatorUpdate{} } defer k.ClearEpochEnd(ctx) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(ctx.ChainID()) // start by clearing the previous consensus keys for the chain. // each AVS can have a separate epoch and hence this function is a part of this module // and not the operator module. diff --git a/x/dogfood/keeper/genesis.go b/x/dogfood/keeper/genesis.go index bf6889cb5..24a111178 100644 --- a/x/dogfood/keeper/genesis.go +++ b/x/dogfood/keeper/genesis.go @@ -4,6 +4,7 @@ import ( "fmt" keytypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" "github.com/ExocoreNetwork/exocore/x/dogfood/types" abci "github.com/cometbft/cometbft/abci/types" @@ -36,7 +37,7 @@ func (k Keeper) InitGenesis( var err error // the avs module will remove the revision by itself, but we do it here anyway because we need it // to look up operator registration status after this - which is keyed by chainID without revision. - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(ctx.ChainID()) if avsAddr, err = k.avsKeeper.RegisterAVSWithChainID(ctx, &avstypes.AVSRegisterOrDeregisterParams{ AvsName: chainIDWithoutRevision, AssetID: genState.Params.AssetIDs, diff --git a/x/dogfood/keeper/impl_delegation_hooks.go b/x/dogfood/keeper/impl_delegation_hooks.go index 9d7c866cf..b873d574a 100644 --- a/x/dogfood/keeper/impl_delegation_hooks.go +++ b/x/dogfood/keeper/impl_delegation_hooks.go @@ -4,7 +4,7 @@ import ( "fmt" keytypes "github.com/ExocoreNetwork/exocore/types/keys" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + "github.com/ExocoreNetwork/exocore/utils" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -37,7 +37,7 @@ func (wrapper DelegationHooksWrapper) AfterDelegation( func (wrapper DelegationHooksWrapper) AfterUndelegationStarted( ctx sdk.Context, operator sdk.AccAddress, recordKey []byte, ) error { - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(ctx.ChainID()) var unbondingCompletionEpoch int64 if wrapper.keeper.operatorKeeper.IsOperatorRemovingKeyFromChainID( ctx, operator, chainIDWithoutRevision, diff --git a/x/dogfood/keeper/impl_epochs_hooks_test.go b/x/dogfood/keeper/impl_epochs_hooks_test.go index aa696c0b4..23f5601fc 100644 --- a/x/dogfood/keeper/impl_epochs_hooks_test.go +++ b/x/dogfood/keeper/impl_epochs_hooks_test.go @@ -4,9 +4,9 @@ import ( sdkmath "cosmossdk.io/math" utiltx "github.com/ExocoreNetwork/exocore/testutil/tx" keytypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" assetskeeper "github.com/ExocoreNetwork/exocore/x/assets/keeper" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -72,7 +72,7 @@ func (suite *KeeperTestSuite) TestSameEpochOperations() { // generate keys, and get the AVS address oldKey := utiltx.GenerateConsensusKey() newKey := utiltx.GenerateConsensusKey() - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) // now define the operations @@ -252,7 +252,7 @@ func (suite *KeeperTestSuite) TestDifferentEpochOperations() { // generate keys, and get the AVS address oldKey := utiltx.GenerateConsensusKey() newKey := utiltx.GenerateConsensusKey() - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) // now define the operations diff --git a/x/dogfood/keeper/impl_operator_hooks.go b/x/dogfood/keeper/impl_operator_hooks.go index 7a9d284d8..c67ed6eff 100644 --- a/x/dogfood/keeper/impl_operator_hooks.go +++ b/x/dogfood/keeper/impl_operator_hooks.go @@ -2,7 +2,7 @@ package keeper import ( keytypes "github.com/ExocoreNetwork/exocore/types/keys" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + "github.com/ExocoreNetwork/exocore/utils" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -48,7 +48,7 @@ func (h OperatorHooksWrapper) AfterOperatorKeyReplaced( // 3. X epochs later, the reverse lookup of old cons addr + chain id -> operator addr // should be cleared. consAddr := oldKey.ToConsAddr() - if chainID == avstypes.ChainIDWithoutRevision(ctx.ChainID()) { + if chainID == utils.ChainIDWithoutRevision(ctx.ChainID()) { // is the oldKey already active? if not, we should not do anything. // this can happen if we opt in with a key, then replace it with another key // during the same epoch. @@ -80,7 +80,7 @@ func (h OperatorHooksWrapper) AfterOperatorKeyRemovalInitiated( // keys from the chain. // 2. X epochs later, the removal is marked complete in the operator module. consAddr := key.ToConsAddr() - if chainID == avstypes.ChainIDWithoutRevision(ctx.ChainID()) { + if chainID == utils.ChainIDWithoutRevision(ctx.ChainID()) { _, found := h.keeper.GetExocoreValidator(ctx, consAddr) if found { h.keeper.SetOptOutInformation(ctx, operator) diff --git a/x/dogfood/keeper/impl_sdk.go b/x/dogfood/keeper/impl_sdk.go index 29e56981e..be1623996 100644 --- a/x/dogfood/keeper/impl_sdk.go +++ b/x/dogfood/keeper/impl_sdk.go @@ -4,6 +4,7 @@ import ( "sort" "cosmossdk.io/math" + "github.com/ExocoreNetwork/exocore/utils" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" abci "github.com/cometbft/cometbft/abci/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -74,7 +75,7 @@ func (k Keeper) ValidatorByConsAddr( // a call to Validator(ctx, addr) in the slashing module, which is implemented in this file. // after that call, the ConsPubKey is fetched, which is also set by the below call. val, found := k.operatorKeeper.ValidatorByConsAddrForChainID( - ctx, addr, avstypes.ChainIDWithoutRevision(ctx.ChainID()), + ctx, addr, utils.ChainIDWithoutRevision(ctx.ChainID()), ) if !found { return nil @@ -104,7 +105,7 @@ func (k Keeper) SlashWithInfractionReason( slashFactor sdk.Dec, infraction stakingtypes.Infraction, ) math.Int { found, accAddress := k.operatorKeeper.GetOperatorAddressForChainIDAndConsAddr( - ctx, avstypes.ChainIDWithoutRevision(ctx.ChainID()), addr, + ctx, utils.ChainIDWithoutRevision(ctx.ChainID()), addr, ) if !found { // TODO(mm): already slashed and removed from the set? @@ -122,7 +123,7 @@ func (k Keeper) SlashWithInfractionReason( // It delegates the call to the operator module. Alternatively, this may be handled // by the slashing module depending upon the design decisions. func (k Keeper) Jail(ctx sdk.Context, addr sdk.ConsAddress) { - k.operatorKeeper.Jail(ctx, addr, avstypes.ChainIDWithoutRevision(ctx.ChainID())) + k.operatorKeeper.Jail(ctx, addr, utils.ChainIDWithoutRevision(ctx.ChainID())) // TODO(mm) // once the operator module jails someone, a hook should be triggered // and the validator removed from the set. same for unjailing. @@ -133,7 +134,7 @@ func (k Keeper) Jail(ctx sdk.Context, addr sdk.ConsAddress) { // operator to do so. TODO(mm): We need to use the SDK's slashing module to allow for downtime // slashing but somehow we need to prevent its Unjail function from being called by anyone. func (k Keeper) Unjail(ctx sdk.Context, addr sdk.ConsAddress) { - k.operatorKeeper.Unjail(ctx, addr, avstypes.ChainIDWithoutRevision(ctx.ChainID())) + k.operatorKeeper.Unjail(ctx, addr, utils.ChainIDWithoutRevision(ctx.ChainID())) } // Delegation is an implementation of the staking interface expected by the SDK's slashing @@ -146,16 +147,23 @@ func (k Keeper) Delegation( // This interface is only used for unjail to retrieve the self delegation value, // so the delegator and validator are the same. operator := delegator - avsAddr := avstypes.GenerateAVSAddr(avstypes.ChainIDWithoutRevision(ctx.ChainID())) - operatorUSDValues, err := k.operatorKeeper.GetOrCalculateOperatorUSDValues(ctx, operator, avsAddr) + avsAddr := avstypes.GenerateAVSAddr(utils.ChainIDWithoutRevision(ctx.ChainID())) + operatorUSDValues, err := k.operatorKeeper.GetOrCalculateOperatorUSDValues( + ctx, operator, avsAddr, + ) if err != nil { - k.Logger(ctx).Error("Delegation: failed to get or calculate the operator USD values", "operator", operator.String(), "chainID", ctx.ChainID(), "error", err) + k.Logger(ctx).Error( + "Delegation: failed to get or calculate the operator USD values", + "operator", operator.String(), "chainID", ctx.ChainID(), "error", err, + ) return nil } return stakingtypes.Delegation{ DelegatorAddress: delegator.String(), ValidatorAddress: validator.String(), - Shares: sdk.TokensFromConsensusPower(operatorUSDValues.SelfUSDValue.TruncateInt64(), sdk.DefaultPowerReduction).ToLegacyDec(), + Shares: sdk.TokensFromConsensusPower( + operatorUSDValues.SelfUSDValue.TruncateInt64(), sdk.DefaultPowerReduction, + ).ToLegacyDec(), } } @@ -177,7 +185,7 @@ func (k Keeper) GetAllValidators(sdk.Context) (validators []stakingtypes.Validat // slashing module. It is called by the slashing module to record validator signatures // for downtime tracking. We delegate the call to the operator keeper. func (k Keeper) IsValidatorJailed(ctx sdk.Context, addr sdk.ConsAddress) bool { - return k.operatorKeeper.IsOperatorJailedForChainID(ctx, addr, avstypes.ChainIDWithoutRevision(ctx.ChainID())) + return k.operatorKeeper.IsOperatorJailedForChainID(ctx, addr, utils.ChainIDWithoutRevision(ctx.ChainID())) } // ApplyAndReturnValidatorSetUpdates is an implementation of the staking interface expected @@ -207,7 +215,7 @@ func (k Keeper) IterateBondedValidatorsByPower( continue } val, found := k.operatorKeeper.ValidatorByConsAddrForChainID( - ctx, sdk.GetConsAddress(pk), avstypes.ChainIDWithoutRevision(ctx.ChainID()), + ctx, sdk.GetConsAddress(pk), utils.ChainIDWithoutRevision(ctx.ChainID()), ) if !found { ctx.Logger().Error("Operator address not found; skipping", "consAddress", sdk.GetConsAddress(pk), "i", i) diff --git a/x/dogfood/keeper/keeper.go b/x/dogfood/keeper/keeper.go index 7198320fa..e50710309 100644 --- a/x/dogfood/keeper/keeper.go +++ b/x/dogfood/keeper/keeper.go @@ -17,6 +17,8 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/ExocoreNetwork/exocore/x/dogfood/types" + + "github.com/ExocoreNetwork/exocore/utils" ) type ( @@ -111,13 +113,13 @@ func (k Keeper) ClearEpochEnd(ctx sdk.Context) { } func (k Keeper) mustValidateFields() { - types.PanicIfNil(k.storeKey, "storeKey") - types.PanicIfNil(k.cdc, "cdc") - types.PanicIfNil(k.epochsKeeper, "epochsKeeper") - types.PanicIfNil(k.operatorKeeper, "operatorKeeper") - types.PanicIfNil(k.delegationKeeper, "delegationKeeper") - types.PanicIfNil(k.restakingKeeper, "restakingKeeper") - types.PanicIfNil(k.avsKeeper, "avsKeeper") + utils.PanicIfNil(k.storeKey, "storeKey") + utils.PanicIfNil(k.cdc, "cdc") + utils.PanicIfNil(k.epochsKeeper, "epochsKeeper") + utils.PanicIfNil(k.operatorKeeper, "operatorKeeper") + utils.PanicIfNil(k.delegationKeeper, "delegationKeeper") + utils.PanicIfNil(k.restakingKeeper, "restakingKeeper") + utils.PanicIfNil(k.avsKeeper, "avsKeeper") // ensure authority is a valid bech32 address if _, err := sdk.AccAddressFromBech32(k.authority); err != nil { panic(fmt.Sprintf("authority address %s is invalid: %s", k.authority, err)) diff --git a/x/dogfood/keeper/msg_server.go b/x/dogfood/keeper/msg_server.go index 63a810670..44289e4bb 100644 --- a/x/dogfood/keeper/msg_server.go +++ b/x/dogfood/keeper/msg_server.go @@ -115,9 +115,13 @@ func (k Keeper) UpdateParams( k.SetParams(c, nextParams) // update the related info in the AVS module - isAVS, avsAddr := k.avsKeeper.IsAVSByChainID(c, avstypes.ChainIDWithoutRevision(c.ChainID())) + isAVS, avsAddr := k.avsKeeper.IsAVSByChainID( + c, utils.ChainIDWithoutRevision(c.ChainID()), + ) if !isAVS { - return nil, errors.Wrapf(types.ErrNotAVSByChainID, "chainID:%s avsAddr:%s", c.ChainID(), avsAddr) + return nil, errors.Wrapf( + types.ErrNotAVSByChainID, "chainID:%s avsAddr:%s", c.ChainID(), avsAddr, + ) } err := k.avsKeeper.UpdateAVSInfo(c, &avstypes.AVSRegisterOrDeregisterParams{ AvsName: c.ChainID(), diff --git a/x/dogfood/keeper/opt_out_test.go b/x/dogfood/keeper/opt_out_test.go index c69f7e71d..b403902ee 100644 --- a/x/dogfood/keeper/opt_out_test.go +++ b/x/dogfood/keeper/opt_out_test.go @@ -5,9 +5,9 @@ import ( utiltx "github.com/ExocoreNetwork/exocore/testutil/tx" keytypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" assetskeeper "github.com/ExocoreNetwork/exocore/x/assets/keeper" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" operatorkeeper "github.com/ExocoreNetwork/exocore/x/operator/keeper" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" @@ -30,7 +30,7 @@ func (suite *KeeperTestSuite) TestBasicOperations() { suite.CheckLengthOfValidatorUpdates(0, nil, "register operator but don't opt in") // opt-in with a key - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) key := utiltx.GenerateConsensusKey() _, err = suite.OperatorMsgServer.OptIntoAVS(sdk.WrapSDKContext(suite.Ctx), &operatortypes.OptIntoAVSReq{ diff --git a/x/dogfood/keeper/unbonding_test.go b/x/dogfood/keeper/unbonding_test.go index 195b01133..4b9999778 100644 --- a/x/dogfood/keeper/unbonding_test.go +++ b/x/dogfood/keeper/unbonding_test.go @@ -3,9 +3,9 @@ package keeper_test import ( sdkmath "cosmossdk.io/math" utiltx "github.com/ExocoreNetwork/exocore/testutil/tx" + "github.com/ExocoreNetwork/exocore/utils" assetskeeper "github.com/ExocoreNetwork/exocore/x/assets/keeper" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -67,7 +67,7 @@ func (suite *KeeperTestSuite) TestUndelegations() { suite.NoError(err) // opt in oldKey := utiltx.GenerateConsensusKey() - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) _, err = suite.OperatorMsgServer.OptIntoAVS( sdk.WrapSDKContext(suite.Ctx), @@ -228,7 +228,7 @@ func (suite *KeeperTestSuite) TestUndelegationEdgeCases() { suite.CheckLengthOfValidatorUpdates(0, []int64{}, "undelegate without opt in") // opt in oldKey := utiltx.GenerateConsensusKey() - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) _, err = suite.OperatorMsgServer.OptIntoAVS( sdk.WrapSDKContext(suite.Ctx), diff --git a/x/dogfood/keeper/validators.go b/x/dogfood/keeper/validators.go index 9cac15e7e..c638e5a71 100644 --- a/x/dogfood/keeper/validators.go +++ b/x/dogfood/keeper/validators.go @@ -6,7 +6,7 @@ import ( "cosmossdk.io/math" keytypes "github.com/ExocoreNetwork/exocore/types/keys" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + "github.com/ExocoreNetwork/exocore/utils" "github.com/ExocoreNetwork/exocore/x/dogfood/types" abci "github.com/cometbft/cometbft/abci/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" @@ -70,7 +70,7 @@ func (k Keeper) ApplyValidatorChanges( // via stakingkeeeper.Validator(ctx, valAddr) // then it fetches the cons pub key from said validator to generate the lookup found, accAddress := k.operatorKeeper.GetOperatorAddressForChainIDAndConsAddr( - ctx, avstypes.ChainIDWithoutRevision(ctx.ChainID()), addr, + ctx, utils.ChainIDWithoutRevision(ctx.ChainID()), addr, ) if !found { // should never happen @@ -341,13 +341,13 @@ func (k Keeper) GetValidator( ) (stakingtypes.Validator, bool) { accAddr := sdk.AccAddress(valAddr) found, wrappedKey, err := k.operatorKeeper.GetOperatorConsKeyForChainID( - ctx, accAddr, avstypes.ChainIDWithoutRevision(ctx.ChainID()), + ctx, accAddr, utils.ChainIDWithoutRevision(ctx.ChainID()), ) if !found || err != nil || wrappedKey == nil { return stakingtypes.Validator{}, false } val, found := k.operatorKeeper.ValidatorByConsAddrForChainID( - ctx, wrappedKey.ToConsAddr(), avstypes.ChainIDWithoutRevision(ctx.ChainID()), + ctx, wrappedKey.ToConsAddr(), utils.ChainIDWithoutRevision(ctx.ChainID()), ) if !found { return stakingtypes.Validator{}, false diff --git a/x/dogfood/types/utils.go b/x/dogfood/types/utils.go index e7ca01047..6c613e4c6 100644 --- a/x/dogfood/types/utils.go +++ b/x/dogfood/types/utils.go @@ -14,9 +14,3 @@ func RemoveFromBytesList(list [][]byte, addr []byte) [][]byte { } panic("address not found in list") } - -func PanicIfNil(x interface{}, msg string) { - if x == nil { - panic("zero or nil value for " + msg) - } -} diff --git a/x/feedistribution/keeper/allocation.go b/x/feedistribution/keeper/allocation.go index 2c86585bf..bea356770 100644 --- a/x/feedistribution/keeper/allocation.go +++ b/x/feedistribution/keeper/allocation.go @@ -4,7 +4,7 @@ import ( "sort" "cosmossdk.io/math" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" + "github.com/ExocoreNetwork/exocore/utils" "github.com/ExocoreNetwork/exocore/x/feedistribution/types" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -47,7 +47,7 @@ func (k Keeper) AllocateTokens(ctx sdk.Context, totalPreviousPower int64) error continue } validatorDetail, found := k.StakingKeeper.ValidatorByConsAddrForChainID( - ctx, sdk.GetConsAddress(pk), avstypes.ChainIDWithoutRevision(ctx.ChainID()), + ctx, sdk.GetConsAddress(pk), utils.ChainIDWithoutRevision(ctx.ChainID()), ) if !found { logger.Error("Operator address not found; skipping", "consAddress", sdk.GetConsAddress(pk), "i", i) diff --git a/x/feedistribution/keeper/hooks_test.go b/x/feedistribution/keeper/hooks_test.go index 1923bb342..5b3550376 100644 --- a/x/feedistribution/keeper/hooks_test.go +++ b/x/feedistribution/keeper/hooks_test.go @@ -10,9 +10,9 @@ import ( utiltx "github.com/ExocoreNetwork/exocore/testutil/tx" keytypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" assetskeeper "github.com/ExocoreNetwork/exocore/x/assets/keeper" assetstypes "github.com/ExocoreNetwork/exocore/x/assets/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" operatorkeeper "github.com/ExocoreNetwork/exocore/x/operator/keeper" operatortypes "github.com/ExocoreNetwork/exocore/x/operator/types" @@ -39,7 +39,7 @@ func (suite *KeeperTestSuite) TestEpochHooks() { continue } validatorDetail, found := suite.App.StakingKeeper.ValidatorByConsAddrForChainID( - suite.Ctx, sdk.GetConsAddress(pk), avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()), + suite.Ctx, sdk.GetConsAddress(pk), utils.ChainIDWithoutRevision(suite.Ctx.ChainID()), ) if !found { suite.Ctx.Logger().Error("Operator address not found; skipping", "consAddress", sdk.GetConsAddress(pk), "i", i) @@ -73,7 +73,7 @@ func (suite *KeeperTestSuite) prepare() { suite.CheckLengthOfValidatorUpdates(0, nil, "register operator but don't opt in") // opt-in with a key - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) _, avsAddress := suite.App.AVSManagerKeeper.IsAVSByChainID(suite.Ctx, chainIDWithoutRevision) key := utiltx.GenerateConsensusKey() _, err = suite.OperatorMsgServer.OptIntoAVS(sdk.WrapSDKContext(suite.Ctx), &operatortypes.OptIntoAVSReq{ diff --git a/x/operator/keeper/consensus_keys.go b/x/operator/keeper/consensus_keys.go index 3818d7537..8d21318c0 100644 --- a/x/operator/keeper/consensus_keys.go +++ b/x/operator/keeper/consensus_keys.go @@ -14,14 +14,15 @@ import ( stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" keytypes "github.com/ExocoreNetwork/exocore/types/keys" + "github.com/ExocoreNetwork/exocore/utils" delegationtypes "github.com/ExocoreNetwork/exocore/x/delegation/types" "github.com/ExocoreNetwork/exocore/x/operator/types" - tmprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" ) // This file indexes by chainID and not the avs address. -// The caller must ensure that the chainID is without the revision number. +// The chainID may or may not have a revision, depending on whether the AVS is +// expected to outlast its upgrades, particularly with the same set of operators. func (k *Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", fmt.Sprintf("x/%s", types.ModuleName)) @@ -437,11 +438,11 @@ func (k Keeper) DeleteOperatorAddressForChainIDAndConsAddr( // ClearPreviousConsensusKeys clears the previous consensus public key for all operators // of the specified chain. func (k Keeper) ClearPreviousConsensusKeys(ctx sdk.Context, chainID string) { - partialKey := types.ChainIDWithLenKey(chainID) + partialKey := utils.ChainIDWithLenKey(chainID) store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator( store, - types.AppendMany( + utils.AppendMany( []byte{types.BytePrefixForOperatorAndChainIDToPrevConsKey}, partialKey, ), diff --git a/x/operator/keeper/grpc_query.go b/x/operator/keeper/grpc_query.go index 1273cff3b..41302ffa4 100644 --- a/x/operator/keeper/grpc_query.go +++ b/x/operator/keeper/grpc_query.go @@ -4,10 +4,10 @@ import ( "context" "errors" + "github.com/ExocoreNetwork/exocore/utils" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" keytypes "github.com/ExocoreNetwork/exocore/types/keys" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" "github.com/ExocoreNetwork/exocore/x/operator/types" tmprotocrypto "github.com/cometbft/cometbft/proto/tendermint/crypto" "github.com/cosmos/cosmos-sdk/store/prefix" @@ -56,7 +56,7 @@ func (k *Keeper) QueryOperatorConsKeyForChainID( if err != nil { return nil, err } - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(req.Chain) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(req.Chain) found, key, err := k.GetOperatorConsKeyForChainID( ctx, addr, chainIDWithoutRevision, ) @@ -83,7 +83,7 @@ func (k Keeper) QueryOperatorConsAddressForChainID( if err != nil { return nil, err } - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(req.Chain) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(req.Chain) found, wrappedKey, err := k.GetOperatorConsKeyForChainID( ctx, addr, chainIDWithoutRevision, ) @@ -107,7 +107,7 @@ func (k Keeper) QueryAllOperatorConsKeysByChainID( ) (*types.QueryAllOperatorConsKeysByChainIDResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) res := make([]*types.OperatorConsKeyPair, 0) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(req.Chain) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(req.Chain) chainPrefix := types.ChainIDAndAddrKey( types.BytePrefixForChainIDAndOperatorToConsKey, chainIDWithoutRevision, nil, @@ -144,7 +144,7 @@ func (k Keeper) QueryAllOperatorConsAddrsByChainID( ) (*types.QueryAllOperatorConsAddrsByChainIDResponse, error) { ctx := sdk.UnwrapSDKContext(goCtx) res := make([]*types.OperatorConsAddrPair, 0) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(req.Chain) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(req.Chain) chainPrefix := types.ChainIDAndAddrKey( types.BytePrefixForChainIDAndOperatorToConsKey, chainIDWithoutRevision, nil, @@ -203,7 +203,7 @@ func (k *Keeper) QueryOperatorSlashInfo(goCtx context.Context, req *types.QueryO ctx := sdk.UnwrapSDKContext(goCtx) res := make([]*types.OperatorSlashInfoByID, 0) - slashPrefix := types.AppendMany(types.KeyPrefixOperatorSlashInfo, assetstype.GetJoinedStoreKeyForPrefix(req.OperatorAddr, req.AvsAddress)) + slashPrefix := utils.AppendMany(types.KeyPrefixOperatorSlashInfo, assetstype.GetJoinedStoreKeyForPrefix(req.OperatorAddr, req.AvsAddress)) store := prefix.NewStore(ctx.KVStore(k.storeKey), slashPrefix) pageRes, err := query.Paginate(store, req.Pagination, func(key []byte, value []byte) error { ret := &types.OperatorSlashInfo{} diff --git a/x/operator/keeper/operator.go b/x/operator/keeper/operator.go index 86a9975ad..2dddf9732 100644 --- a/x/operator/keeper/operator.go +++ b/x/operator/keeper/operator.go @@ -196,6 +196,20 @@ func (k *Keeper) GetOptedInAVSForOperator(ctx sdk.Context, operatorAddr string) return avsList, nil } +func (k Keeper) GetChainIDsForOperator(ctx sdk.Context, operatorAddr string) ([]string, error) { + addrs, err := k.GetOptedInAVSForOperator(ctx, operatorAddr) + if err != nil { + return nil, err + } + chainIDs := make([]string, 0, len(addrs)) + for _, addr := range addrs { + if chainID, found := k.avsKeeper.GetChainIDByAVSAddr(ctx, addr); found { + chainIDs = append(chainIDs, chainID) + } + } + return chainIDs, nil +} + func (k *Keeper) SetAllOptedInfo(ctx sdk.Context, optedStates []operatortypes.OptedState) error { store := prefix.NewStore(ctx.KVStore(k.storeKey), operatortypes.KeyPrefixOperatorOptedAVSInfo) for i := range optedStates { diff --git a/x/operator/keeper/opt.go b/x/operator/keeper/opt.go index 9128a9ad6..21e1f7c6c 100644 --- a/x/operator/keeper/opt.go +++ b/x/operator/keeper/opt.go @@ -113,12 +113,12 @@ func (k *Keeper) OptOut(ctx sdk.Context, operatorAddress sdk.AccAddress, avsAddr return delegationtypes.ErrOperatorIsFrozen } // check if it is the chain-type AVS - chainIDWithoutRevision, isChainAvs := k.avsKeeper.GetChainIDByAVSAddr(ctx, avsAddr) + chainID, isChainAvs := k.avsKeeper.GetChainIDByAVSAddr(ctx, avsAddr) // set up the deferred function to remove key and write cache defer func() { if err == nil && isChainAvs { // store.Delete... doesn't fail - k.InitiateOperatorKeyRemovalForChainID(ctx, operatorAddress, chainIDWithoutRevision) + k.InitiateOperatorKeyRemovalForChainID(ctx, operatorAddress, chainID) } }() diff --git a/x/operator/keeper/slash.go b/x/operator/keeper/slash.go index 1f1c6a58d..19c46d4ae 100644 --- a/x/operator/keeper/slash.go +++ b/x/operator/keeper/slash.go @@ -2,16 +2,16 @@ package keeper import ( "strings" + "time" - "github.com/ExocoreNetwork/exocore/utils" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" "github.com/ethereum/go-ethereum/common/hexutil" errorsmod "cosmossdk.io/errors" sdkmath "cosmossdk.io/math" + "github.com/ExocoreNetwork/exocore/utils" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" - avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" delegationtype "github.com/ExocoreNetwork/exocore/x/delegation/types" "github.com/ExocoreNetwork/exocore/x/operator/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -186,7 +186,8 @@ func (k Keeper) SlashWithInfractionReason( ctx sdk.Context, addr sdk.AccAddress, infractionHeight, power int64, slashFactor sdk.Dec, infraction stakingtypes.Infraction, ) sdkmath.Int { - chainID := avstypes.ChainIDWithoutRevision(ctx.ChainID()) + // for x/dogfood, we use the chainIDWithoutRevision as the chainID + chainID := utils.ChainIDWithoutRevision(ctx.ChainID()) isAvs, avsAddr := k.avsKeeper.IsAVSByChainID(ctx, chainID) if !isAvs { k.Logger(ctx).Error("the chainID is not supported by AVS", "chainID", chainID) @@ -265,3 +266,23 @@ func (k Keeper) Jail(ctx sdk.Context, consAddr sdk.ConsAddress, chainID string) func (k Keeper) Unjail(ctx sdk.Context, consAddr sdk.ConsAddress, chainID string) { k.SetJailedState(ctx, consAddr, chainID, false) } + +// ApplySlashForHeight is a function used by x/appchain/coordinator to slash an operator +// based on the historical power and the current assets state. The slashing is made for the +// provided avsAddress. +func (k Keeper) ApplySlashForHeight( + ctx sdk.Context, operatorAccAddress sdk.AccAddress, avsAddress string, + height uint64, fraction sdk.Dec, infraction stakingtypes.Infraction, + jailDuration time.Duration, +) error { + k.Logger(ctx).Info( + "ApplySlashForHeight", + "operatorAccAddress", operatorAccAddress, + "avsAddress", avsAddress, + "height", height, + "fraction", fraction, + "infraction", infraction, + "jailDuration", jailDuration, + ) + return nil +} diff --git a/x/operator/keeper/slash_test.go b/x/operator/keeper/slash_test.go index afef3db5b..02f3dc386 100644 --- a/x/operator/keeper/slash_test.go +++ b/x/operator/keeper/slash_test.go @@ -4,6 +4,7 @@ import ( "time" sdkmath "cosmossdk.io/math" + "github.com/ExocoreNetwork/exocore/utils" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" "github.com/ExocoreNetwork/exocore/x/operator/keeper" "github.com/ExocoreNetwork/exocore/x/operator/types" @@ -25,7 +26,7 @@ func (suite *OperatorTestSuite) TestSlashWithInfractionReason() { suite.NoError(err) // opt into the AVS - avsAddr := avstypes.GenerateAVSAddr(avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID())) + avsAddr := avstypes.GenerateAVSAddr(utils.ChainIDWithoutRevision(suite.Ctx.ChainID())) err = suite.App.OperatorKeeper.OptIn(suite.Ctx, suite.operatorAddr, avsAddr) suite.NoError(err) // call the EndBlock to update the voting power diff --git a/x/operator/keeper/usd_value_test.go b/x/operator/keeper/usd_value_test.go index 116f3c6b4..e396ec338 100644 --- a/x/operator/keeper/usd_value_test.go +++ b/x/operator/keeper/usd_value_test.go @@ -4,6 +4,7 @@ import ( "time" sdkmath "cosmossdk.io/math" + "github.com/ExocoreNetwork/exocore/utils" assetstype "github.com/ExocoreNetwork/exocore/x/assets/types" avstypes "github.com/ExocoreNetwork/exocore/x/avs/types" operatorKeeper "github.com/ExocoreNetwork/exocore/x/operator/keeper" @@ -119,8 +120,8 @@ func (suite *OperatorTestSuite) TestVotingPowerForDogFood() { addPower := 1 addUSDValue := sdkmath.LegacyNewDec(1) - chainIDWithoutRevision := avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID()) - avsAddress := avstypes.GenerateAVSAddr(avstypes.ChainIDWithoutRevision(suite.Ctx.ChainID())) + chainIDWithoutRevision := utils.ChainIDWithoutRevision(suite.Ctx.ChainID()) + avsAddress := avstypes.GenerateAVSAddr(chainIDWithoutRevision) // CommitAfter causes the epoch hook to be triggered, and results in writing // of the AVS usd value to the store. suite.CommitAfter(time.Hour*24 + time.Nanosecond) diff --git a/x/operator/types/keys.go b/x/operator/types/keys.go index 1033799af..216bf960b 100644 --- a/x/operator/types/keys.go +++ b/x/operator/types/keys.go @@ -3,12 +3,12 @@ package types import ( "math" + "github.com/ExocoreNetwork/exocore/utils" + sdk "github.com/cosmos/cosmos-sdk/types" "golang.org/x/xerrors" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/ethereum/go-ethereum/common" - - sdk "github.com/cosmos/cosmos-sdk/types" ) // constants @@ -104,8 +104,8 @@ func init() { } func AddrAndChainIDKey(prefix byte, addr sdk.AccAddress, chainID string) []byte { - partialKey := ChainIDWithLenKey(chainID) - return AppendMany( + partialKey := utils.ChainIDWithLenKey(chainID) + return utils.AppendMany( // Append the prefix []byte{prefix}, // Append the addr bytes first so we can iterate over all chain ids @@ -117,8 +117,8 @@ func AddrAndChainIDKey(prefix byte, addr sdk.AccAddress, chainID string) []byte } func ChainIDAndAddrKey(prefix byte, chainID string, addr sdk.AccAddress) []byte { - partialKey := ChainIDWithLenKey(chainID) - return AppendMany( + partialKey := utils.ChainIDWithLenKey(chainID) + return utils.AppendMany( // Append the prefix []byte{prefix}, // Append the partialKey so that we can look for any operator keys @@ -198,17 +198,21 @@ func KeyForChainIDAndOperatorToConsKey(chainID string, addr sdk.AccAddress) []by } func KeyForChainIDAndConsKeyToOperator(chainID string, addr sdk.ConsAddress) []byte { - return AppendMany( + return utils.AppendMany( []byte{BytePrefixForChainIDAndConsKeyToOperator}, - ChainIDWithLenKey(chainID), + utils.ChainIDWithLenKey(chainID), addr, ) } func KeyForOperatorKeyRemovalForChainID(addr sdk.AccAddress, chainID string) []byte { - return AppendMany( - []byte{BytePrefixForOperatorKeyRemovalForChainID}, addr, - ChainIDWithLenKey(chainID), + return utils.AppendMany( + []byte{BytePrefixForOperatorKeyRemovalForChainID}, + addr, + // TODO: it may be possible to just use the chainID here without the length. + // This is because the chainID is at the end of the key and we can just iterate + // over all keys with the same operator address. + utils.ChainIDWithLenKey(chainID), ) } diff --git a/x/operator/types/utils.go b/x/operator/types/utils.go deleted file mode 100644 index 6ec7351a9..000000000 --- a/x/operator/types/utils.go +++ /dev/null @@ -1,27 +0,0 @@ -package types - -import ( - sdk "github.com/cosmos/cosmos-sdk/types" -) - -// AppendMany appends a variable number of byte slices together -func AppendMany(byteses ...[]byte) (out []byte) { - for _, bytes := range byteses { - out = append(out, bytes...) - } - return out -} - -// ChainIDWithLenKey returns the key with the following format: -// bytePrefix | len(chainId) | chainId -// This is similar to Solidity's ABI encoding. -func ChainIDWithLenKey(chainID string) []byte { - chainIDL := len(chainID) - return AppendMany( - // Append the chainID length - // #nosec G701 - sdk.Uint64ToBigEndian(uint64(chainIDL)), - // Append the chainID - []byte(chainID), - ) -}