From 29f6f5f33ad2636c06b2c56d61376806d5f7073d Mon Sep 17 00:00:00 2001 From: sh-cha Date: Tue, 24 Dec 2024 13:43:44 +0900 Subject: [PATCH] celestia batch reconstruction test --- e2e/batch_reconstruction_test.go | 143 +++++++++++++++++------------ e2e/da_chain.go | 152 +++++++++++++++++++++++++++++++ e2e/helper.go | 60 +++++++----- e2e/l1_chain.go | 49 +--------- 4 files changed, 277 insertions(+), 127 deletions(-) create mode 100644 e2e/da_chain.go diff --git a/e2e/batch_reconstruction_test.go b/e2e/batch_reconstruction_test.go index f4c1fe7..03454d0 100644 --- a/e2e/batch_reconstruction_test.go +++ b/e2e/batch_reconstruction_test.go @@ -54,11 +54,6 @@ func TestBatchReconstructionTest(t *testing.T) { NumFullNodes: 0, } - daChainConfig := &DAChainConfig{ - ChainConfig: *l1ChainConfig, - ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, - } - bridgeConfig := &BridgeConfig{ SubmissionInterval: "5s", FinalizationPeriod: "10s", @@ -67,76 +62,112 @@ func TestBatchReconstructionTest(t *testing.T) { Metadata: "", } - ctx := context.Background() - - op := SetupTest(t, ctx, BotExecutor, l1ChainConfig, l2ChainConfig, daChainConfig, bridgeConfig) - - err := testutil.WaitForBlocks(ctx, 20, op.Initia, op.Minitia) - require.NoError(t, err) + cases := []struct { + name string + daChainConfig DAChainConfig + }{ + { + name: "celestia", + daChainConfig: DAChainConfig{ + ChainConfig: ChainConfig{ + ChainID: "celestia", + Image: ibc.DockerImage{Repository: "ghcr.io/celestiaorg/celestia-app", Version: "v3.2.0", UIDGID: "10001:10001"}, + Bin: "celestia-appd", + Bech32Prefix: "celestia", + Denom: "utia", + Gas: "auto", + GasPrices: "0.25utia", + GasAdjustment: 1.5, + TrustingPeriod: "168h", + NumValidators: 1, + NumFullNodes: 0, + }, + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA, + }, + }, + { + name: "initia", + daChainConfig: DAChainConfig{ + ChainConfig: *l1ChainConfig, + ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA, + }, + }, + } - batches, err := op.Initia.QueryRecordBatch(ctx) - require.NoError(t, err) + for _, tc := range cases { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() - var header executortypes.BatchDataHeader - var chunks []executortypes.BatchDataChunk - var blockBytes []byte + op := SetupTest(t, ctx, BotExecutor, l1ChainConfig, l2ChainConfig, &tc.daChainConfig, bridgeConfig) - for _, batch := range batches { - if batch[0] == byte(executortypes.BatchDataTypeHeader) { - header, err = executortypes.UnmarshalBatchDataHeader(batch) + err := testutil.WaitForBlocks(ctx, 20, op.Initia, op.Minitia) require.NoError(t, err) - } else { - chunk, err := executortypes.UnmarshalBatchDataChunk(batch) + + batches, err := op.DA.QueryBatchData(ctx) require.NoError(t, err) - chunks = append(chunks, chunk) - require.Equal(t, header.Start, chunk.Start) - require.Equal(t, header.End, chunk.End) - require.Equal(t, len(header.Checksums), int(chunk.Length)) - } + var header executortypes.BatchDataHeader + var chunks []executortypes.BatchDataChunk + var blockBytes []byte + + for _, batch := range batches { + if batch[0] == byte(executortypes.BatchDataTypeHeader) { + header, err = executortypes.UnmarshalBatchDataHeader(batch) + require.NoError(t, err) + } else { + chunk, err := executortypes.UnmarshalBatchDataChunk(batch) + require.NoError(t, err) + chunks = append(chunks, chunk) + + require.Equal(t, header.Start, chunk.Start) + require.Equal(t, header.End, chunk.End) + require.Equal(t, len(header.Checksums), int(chunk.Length)) + } - if len(header.Checksums) == len(chunks) { - for i, chunk := range chunks { - require.Equal(t, i, int(chunk.Index)) + if len(header.Checksums) == len(chunks) { + for i, chunk := range chunks { + require.Equal(t, i, int(chunk.Index)) - checksum := executortypes.GetChecksumFromChunk(chunk.ChunkData) - require.Equal(t, header.Checksums[i], checksum[:]) + checksum := executortypes.GetChecksumFromChunk(chunk.ChunkData) + require.Equal(t, header.Checksums[i], checksum[:]) - blockBytes = append(blockBytes, chunk.ChunkData...) - } + blockBytes = append(blockBytes, chunk.ChunkData...) + } - blocks, err := decompressBatch(blockBytes) - require.NoError(t, err) + blocks, err := decompressBatch(blockBytes) + require.NoError(t, err) - for _, blockBz := range blocks[:len(blocks)-1] { - block, err := unmarshalBlock(blockBz) - require.NoError(t, err) - require.NotNil(t, block) + for _, blockBz := range blocks[:len(blocks)-1] { + block, err := unmarshalBlock(blockBz) + require.NoError(t, err) + require.NotNil(t, block) - err = fillOracleData(ctx, block, op.Initia) - require.NoError(t, err) + err = fillOracleData(ctx, block, op.Initia) + require.NoError(t, err) - pbb, err := block.ToProto() - require.NoError(t, err) + pbb, err := block.ToProto() + require.NoError(t, err) - blockBytes, err := pbb.Marshal() - require.NoError(t, err) + blockBytes, err := pbb.Marshal() + require.NoError(t, err) - l2Block, err := op.Minitia.GetFullNode().Client.Block(ctx, &block.Height) - require.NoError(t, err) + l2Block, err := op.Minitia.GetFullNode().Client.Block(ctx, &block.Height) + require.NoError(t, err) - expectedBlock, err := l2Block.Block.ToProto() - require.NoError(t, err) + expectedBlock, err := l2Block.Block.ToProto() + require.NoError(t, err) - expectedBlockBytes, err := expectedBlock.Marshal() - require.NoError(t, err) + expectedBlockBytes, err := expectedBlock.Marshal() + require.NoError(t, err) - require.Equal(t, expectedBlockBytes, blockBytes) - } + require.Equal(t, expectedBlockBytes, blockBytes) + } - chunks = make([]executortypes.BatchDataChunk, 0) - blockBytes = make([]byte, 0) - } + chunks = make([]executortypes.BatchDataChunk, 0) + blockBytes = make([]byte, 0) + } + } + }) } } diff --git a/e2e/da_chain.go b/e2e/da_chain.go new file mode 100644 index 0000000..61ad353 --- /dev/null +++ b/e2e/da_chain.go @@ -0,0 +1,152 @@ +package e2e + +import ( + "context" + "fmt" + + ophosttypes "github.com/initia-labs/OPinit/x/ophost/types" + "github.com/strangelove-ventures/interchaintest/v8/chain/cosmos" + "github.com/strangelove-ventures/interchaintest/v8/ibc" + "go.uber.org/zap" + + txv1beta1 "cosmossdk.io/api/cosmos/tx/v1beta1" + comettypes "github.com/cometbft/cometbft/types" + "google.golang.org/protobuf/proto" + + ophostv1 "github.com/initia-labs/OPinit/api/opinit/ophost/v1" + + opcelestia "github.com/initia-labs/opinit-bots/types/celestia" +) + +type DAChainNode struct { + *cosmos.ChainNode + log *zap.Logger +} + +func NewDAChainNode(log *zap.Logger, chainNode *cosmos.ChainNode) *DAChainNode { + return &DAChainNode{ + ChainNode: chainNode, + log: log, + } +} + +type DAChain struct { + *cosmos.CosmosChain + + ChainType ophosttypes.BatchInfo_ChainType + BatchSubmitter ibc.Wallet + + log *zap.Logger +} + +func NewDAChain(log *zap.Logger, cosmosChain *cosmos.CosmosChain, chainType ophosttypes.BatchInfo_ChainType, batchSubmitter ibc.Wallet) *DAChain { + return &DAChain{ + log: log, + CosmosChain: cosmosChain, + ChainType: chainType, + BatchSubmitter: batchSubmitter, + } +} + +func (da *DAChain) GetNode() *DAChainNode { + return NewDAChainNode(da.log, da.CosmosChain.GetNode()) +} + +func (da *DAChain) GetFullNode() *DAChainNode { + return NewDAChainNode(da.log, da.CosmosChain.GetFullNode()) +} + +func (da *DAChain) QueryBatchData(ctx context.Context) ([][]byte, error) { + switch da.ChainType { + case ophosttypes.BatchInfo_CHAIN_TYPE_INITIA: + return da.QueryInitiaBatchData(ctx) + case ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA: + return da.QueryCelestiaBatchData(ctx) + } + return nil, fmt.Errorf("unsupported chain type") +} + +func (da *DAChain) QueryInitiaBatchData(ctx context.Context) ([][]byte, error) { + if da.ChainType != ophosttypes.BatchInfo_CHAIN_TYPE_INITIA { + return nil, fmt.Errorf("unmatched chain type") + } + + data := [][]byte{} + page := 1 + perPage := 100 + + for { + txsResult, err := da.GetFullNode().Client.TxSearch(ctx, "message.action='/opinit.ophost.v1.MsgRecordBatch'", false, &page, &perPage, "asc") + if err != nil { + return nil, err + } + + for _, tx := range txsResult.Txs { + var raw txv1beta1.TxRaw + if err := proto.Unmarshal(tx.Tx, &raw); err != nil { + return nil, err + } + + var body txv1beta1.TxBody + if err := proto.Unmarshal(raw.BodyBytes, &body); err != nil { + return nil, err + } + + if len(body.Messages) == 0 { + continue + } + + recordBatch := new(ophostv1.MsgRecordBatch) + if err := body.Messages[0].UnmarshalTo(recordBatch); err != nil { + return nil, err + } + data = append(data, recordBatch.BatchBytes) + } + + if txsResult.TotalCount <= page*100 { + break + } + } + return data, nil +} + +func (da *DAChain) QueryCelestiaBatchData(ctx context.Context) ([][]byte, error) { + if da.ChainType != ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA { + return nil, fmt.Errorf("unmatched chain type") + } + + data := [][]byte{} + page := 1 + perPage := 100 + + var lastBlock *comettypes.Block + + for { + txsResult, err := da.GetFullNode().Client.TxSearch(ctx, "message.action='/celestia.blob.v1.MsgPayForBlobs'", false, &page, &perPage, "asc") + if err != nil { + return nil, err + } + + for _, tx := range txsResult.Txs { + if lastBlock == nil || tx.Height != lastBlock.Height { + blockResult, err := da.GetFullNode().Client.Block(ctx, &tx.Height) + if err != nil { + return nil, err + } + lastBlock = blockResult.Block + } + + var blobTx opcelestia.BlobTx + err = blobTx.Unmarshal(lastBlock.Txs[tx.Index]) + if err != nil { + return nil, err + } + data = append(data, blobTx.Blobs[0].Data) + } + + if txsResult.TotalCount <= page*100 { + break + } + } + return data, nil +} diff --git a/e2e/helper.go b/e2e/helper.go index 1aba0b9..7541472 100644 --- a/e2e/helper.go +++ b/e2e/helper.go @@ -95,10 +95,9 @@ type BridgeConfig struct { type OPTestHelper struct { Logger *zap.Logger - Initia *L1Chain - Minitia *L2Chain - DA *cosmos.CosmosChain - DAChainType ophosttypes.BatchInfo_ChainType + Initia *L1Chain + Minitia *L2Chain + DA *DAChain OP *OPBot Relayer ibc.Relayer @@ -348,14 +347,28 @@ func SetupTest( Images: []ibc.DockerImage{ daChainConfig.Image, }, - Bin: daChainConfig.Bin, - Bech32Prefix: daChainConfig.Bech32Prefix, - Denom: daChainConfig.Denom, - Gas: daChainConfig.Gas, - GasPrices: daChainConfig.GasPrices, - GasAdjustment: daChainConfig.GasAdjustment, - TrustingPeriod: daChainConfig.TrustingPeriod, - NoHostMount: false, + Bin: daChainConfig.Bin, + Bech32Prefix: daChainConfig.Bech32Prefix, + Denom: daChainConfig.Denom, + Gas: daChainConfig.Gas, + GasPrices: daChainConfig.GasPrices, + GasAdjustment: daChainConfig.GasAdjustment, + TrustingPeriod: daChainConfig.TrustingPeriod, + NoHostMount: false, + AdditionalStartArgs: []string{"--force-no-bbr"}, + PreGenesis: func(ch ibc.Chain) error { + daChain := ch.(*cosmos.CosmosChain) + + ctx := context.Background() + c := make(testutil.Toml) + txIndex := make(testutil.Toml) + txIndex["indexer"] = "kv" + c["tx_index"] = txIndex + + err = testutil.ModifyTomlConfigFile(ctx, logger, client, t.Name(), daChain.Validators[0].VolumeName, "config/config.toml", c) + require.NoError(t, err) + return nil + }, }, NumValidators: &daChainConfig.NumValidators, NumFullNodes: &daChainConfig.NumFullNodes, @@ -367,10 +380,6 @@ func SetupTest( require.NoError(t, err) initia, minitia := chains[0].(*cosmos.CosmosChain), chains[1].(*cosmos.CosmosChain) - da := initia - if len(chains) == 3 { - da = chains[2].(*cosmos.CosmosChain) - } // relayer setup @@ -387,6 +396,12 @@ func SetupTest( Path: ibcPath, }) + da := initia + if len(chains) == 3 { + da = chains[2].(*cosmos.CosmosChain) + ic.AddChain(da) + } + icBuildOptions := interchaintest.InterchainBuildOptions{ TestName: t.Name(), Client: client, @@ -419,9 +434,9 @@ func SetupTest( }) require.NoError(t, err) - err = initia.SendFunds(ctx, interchaintest.FaucetAccountKeyName, ibc.WalletAmount{ + err = da.SendFunds(ctx, interchaintest.FaucetAccountKeyName, ibc.WalletAmount{ Address: batchSubmitter.FormattedAddress(), - Denom: initia.Config().Denom, + Denom: da.Config().Denom, Amount: math.NewInt(100_000_000_000), }) require.NoError(t, err) @@ -445,10 +460,9 @@ func SetupTest( helper := OPTestHelper{ logger, - NewL1Chain(logger, initia, outputSubmitter, batchSubmitter, challenger), + NewL1Chain(logger, initia, outputSubmitter, challenger), NewL2Chain(logger, minitia, bridgeExecutor, oracleBridgeExecutor, l2Validator), - da, - daChainConfig.ChainType, + NewDAChain(logger, da, daChainConfig.ChainType, batchSubmitter), op, relayer, @@ -491,8 +505,8 @@ func (op OPTestHelper) BridgeConfig() ophostcli.BridgeCliConfig { Challenger: op.Initia.Challenger.FormattedAddress(), Proposer: op.Initia.OutputSubmitter.FormattedAddress(), BatchInfo: ophosttypes.BatchInfo{ - Submitter: op.Initia.BatchSubmitter.FormattedAddress(), - ChainType: op.DAChainType, + Submitter: op.DA.BatchSubmitter.FormattedAddress(), + ChainType: op.DA.ChainType, }, SubmissionInterval: op.bridgeConfig.SubmissionInterval, FinalizationPeriod: op.bridgeConfig.FinalizationPeriod, diff --git a/e2e/l1_chain.go b/e2e/l1_chain.go index 970bb5e..a6b21ce 100644 --- a/e2e/l1_chain.go +++ b/e2e/l1_chain.go @@ -14,11 +14,6 @@ import ( "go.uber.org/zap" sdk "github.com/cosmos/cosmos-sdk/types" - - txv1beta1 "cosmossdk.io/api/cosmos/tx/v1beta1" - "google.golang.org/protobuf/proto" - - ophostv1 "github.com/initia-labs/OPinit/api/opinit/ophost/v1" ) type L1ChainNode struct { @@ -37,18 +32,16 @@ type L1Chain struct { *cosmos.CosmosChain OutputSubmitter ibc.Wallet - BatchSubmitter ibc.Wallet Challenger ibc.Wallet log *zap.Logger } -func NewL1Chain(log *zap.Logger, cosmosChain *cosmos.CosmosChain, outputSubmitter ibc.Wallet, batchSubmitter ibc.Wallet, challenger ibc.Wallet) *L1Chain { +func NewL1Chain(log *zap.Logger, cosmosChain *cosmos.CosmosChain, outputSubmitter ibc.Wallet, challenger ibc.Wallet) *L1Chain { return &L1Chain{ log: log, CosmosChain: cosmosChain, OutputSubmitter: outputSubmitter, - BatchSubmitter: batchSubmitter, Challenger: challenger, } } @@ -148,43 +141,3 @@ func (l1 *L1Chain) QueryOutputProposal(ctx context.Context, bridgeId uint64, out func (l1 *L1Chain) QueryLastFinalizedOutput(ctx context.Context, bridgeId uint64) (*ophosttypes.QueryLastFinalizedOutputResponse, error) { return ophosttypes.NewQueryClient(l1.GetFullNode().GrpcConn).LastFinalizedOutput(ctx, &ophosttypes.QueryLastFinalizedOutputRequest{BridgeId: bridgeId}) } - -func (l1 *L1Chain) QueryRecordBatch(ctx context.Context) ([][]byte, error) { - data := [][]byte{} - page := 1 - perPage := 100 - - for { - txsResult, err := l1.GetFullNode().Client.TxSearch(ctx, "message.action='/opinit.ophost.v1.MsgRecordBatch'", false, &page, &perPage, "asc") - if err != nil { - return nil, err - } - - for _, tx := range txsResult.Txs { - var raw txv1beta1.TxRaw - if err := proto.Unmarshal(tx.Tx, &raw); err != nil { - return nil, err - } - - var body txv1beta1.TxBody - if err := proto.Unmarshal(raw.BodyBytes, &body); err != nil { - return nil, err - } - - if len(body.Messages) == 0 { - continue - } - - recordBatch := new(ophostv1.MsgRecordBatch) - if err := body.Messages[0].UnmarshalTo(recordBatch); err != nil { - return nil, err - } - data = append(data, recordBatch.BatchBytes) - } - - if txsResult.TotalCount <= page*100 { - break - } - } - return data, nil -}