Skip to content

Commit

Permalink
celestia batch reconstruction test
Browse files Browse the repository at this point in the history
  • Loading branch information
sh-cha committed Dec 24, 2024
1 parent cb3dcec commit 29f6f5f
Show file tree
Hide file tree
Showing 4 changed files with 277 additions and 127 deletions.
143 changes: 87 additions & 56 deletions e2e/batch_reconstruction_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,11 +54,6 @@ func TestBatchReconstructionTest(t *testing.T) {
NumFullNodes: 0,
}

daChainConfig := &DAChainConfig{
ChainConfig: *l1ChainConfig,
ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA,
}

bridgeConfig := &BridgeConfig{
SubmissionInterval: "5s",
FinalizationPeriod: "10s",
Expand All @@ -67,76 +62,112 @@ func TestBatchReconstructionTest(t *testing.T) {
Metadata: "",
}

ctx := context.Background()

op := SetupTest(t, ctx, BotExecutor, l1ChainConfig, l2ChainConfig, daChainConfig, bridgeConfig)

err := testutil.WaitForBlocks(ctx, 20, op.Initia, op.Minitia)
require.NoError(t, err)
cases := []struct {
name string
daChainConfig DAChainConfig
}{
{
name: "celestia",
daChainConfig: DAChainConfig{
ChainConfig: ChainConfig{
ChainID: "celestia",
Image: ibc.DockerImage{Repository: "ghcr.io/celestiaorg/celestia-app", Version: "v3.2.0", UIDGID: "10001:10001"},
Bin: "celestia-appd",
Bech32Prefix: "celestia",
Denom: "utia",
Gas: "auto",
GasPrices: "0.25utia",
GasAdjustment: 1.5,
TrustingPeriod: "168h",
NumValidators: 1,
NumFullNodes: 0,
},
ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA,
},
},
{
name: "initia",
daChainConfig: DAChainConfig{
ChainConfig: *l1ChainConfig,
ChainType: ophosttypes.BatchInfo_CHAIN_TYPE_INITIA,
},
},
}

batches, err := op.Initia.QueryRecordBatch(ctx)
require.NoError(t, err)
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()

var header executortypes.BatchDataHeader
var chunks []executortypes.BatchDataChunk
var blockBytes []byte
op := SetupTest(t, ctx, BotExecutor, l1ChainConfig, l2ChainConfig, &tc.daChainConfig, bridgeConfig)

for _, batch := range batches {
if batch[0] == byte(executortypes.BatchDataTypeHeader) {
header, err = executortypes.UnmarshalBatchDataHeader(batch)
err := testutil.WaitForBlocks(ctx, 20, op.Initia, op.Minitia)
require.NoError(t, err)
} else {
chunk, err := executortypes.UnmarshalBatchDataChunk(batch)

batches, err := op.DA.QueryBatchData(ctx)
require.NoError(t, err)
chunks = append(chunks, chunk)

require.Equal(t, header.Start, chunk.Start)
require.Equal(t, header.End, chunk.End)
require.Equal(t, len(header.Checksums), int(chunk.Length))
}
var header executortypes.BatchDataHeader
var chunks []executortypes.BatchDataChunk
var blockBytes []byte

for _, batch := range batches {
if batch[0] == byte(executortypes.BatchDataTypeHeader) {
header, err = executortypes.UnmarshalBatchDataHeader(batch)
require.NoError(t, err)
} else {
chunk, err := executortypes.UnmarshalBatchDataChunk(batch)
require.NoError(t, err)
chunks = append(chunks, chunk)

require.Equal(t, header.Start, chunk.Start)
require.Equal(t, header.End, chunk.End)
require.Equal(t, len(header.Checksums), int(chunk.Length))
}

if len(header.Checksums) == len(chunks) {
for i, chunk := range chunks {
require.Equal(t, i, int(chunk.Index))
if len(header.Checksums) == len(chunks) {
for i, chunk := range chunks {
require.Equal(t, i, int(chunk.Index))

checksum := executortypes.GetChecksumFromChunk(chunk.ChunkData)
require.Equal(t, header.Checksums[i], checksum[:])
checksum := executortypes.GetChecksumFromChunk(chunk.ChunkData)
require.Equal(t, header.Checksums[i], checksum[:])

blockBytes = append(blockBytes, chunk.ChunkData...)
}
blockBytes = append(blockBytes, chunk.ChunkData...)
}

blocks, err := decompressBatch(blockBytes)
require.NoError(t, err)
blocks, err := decompressBatch(blockBytes)
require.NoError(t, err)

for _, blockBz := range blocks[:len(blocks)-1] {
block, err := unmarshalBlock(blockBz)
require.NoError(t, err)
require.NotNil(t, block)
for _, blockBz := range blocks[:len(blocks)-1] {
block, err := unmarshalBlock(blockBz)
require.NoError(t, err)
require.NotNil(t, block)

err = fillOracleData(ctx, block, op.Initia)
require.NoError(t, err)
err = fillOracleData(ctx, block, op.Initia)
require.NoError(t, err)

pbb, err := block.ToProto()
require.NoError(t, err)
pbb, err := block.ToProto()
require.NoError(t, err)

blockBytes, err := pbb.Marshal()
require.NoError(t, err)
blockBytes, err := pbb.Marshal()
require.NoError(t, err)

l2Block, err := op.Minitia.GetFullNode().Client.Block(ctx, &block.Height)
require.NoError(t, err)
l2Block, err := op.Minitia.GetFullNode().Client.Block(ctx, &block.Height)
require.NoError(t, err)

expectedBlock, err := l2Block.Block.ToProto()
require.NoError(t, err)
expectedBlock, err := l2Block.Block.ToProto()
require.NoError(t, err)

expectedBlockBytes, err := expectedBlock.Marshal()
require.NoError(t, err)
expectedBlockBytes, err := expectedBlock.Marshal()
require.NoError(t, err)

require.Equal(t, expectedBlockBytes, blockBytes)
}
require.Equal(t, expectedBlockBytes, blockBytes)
}

chunks = make([]executortypes.BatchDataChunk, 0)
blockBytes = make([]byte, 0)
}
chunks = make([]executortypes.BatchDataChunk, 0)
blockBytes = make([]byte, 0)
}
}
})
}
}

Expand Down
152 changes: 152 additions & 0 deletions e2e/da_chain.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
package e2e

import (
"context"
"fmt"

ophosttypes "github.com/initia-labs/OPinit/x/ophost/types"
"github.com/strangelove-ventures/interchaintest/v8/chain/cosmos"
"github.com/strangelove-ventures/interchaintest/v8/ibc"
"go.uber.org/zap"

txv1beta1 "cosmossdk.io/api/cosmos/tx/v1beta1"
comettypes "github.com/cometbft/cometbft/types"
"google.golang.org/protobuf/proto"

ophostv1 "github.com/initia-labs/OPinit/api/opinit/ophost/v1"

opcelestia "github.com/initia-labs/opinit-bots/types/celestia"
)

type DAChainNode struct {
*cosmos.ChainNode
log *zap.Logger
}

func NewDAChainNode(log *zap.Logger, chainNode *cosmos.ChainNode) *DAChainNode {
return &DAChainNode{
ChainNode: chainNode,
log: log,
}
}

type DAChain struct {
*cosmos.CosmosChain

ChainType ophosttypes.BatchInfo_ChainType
BatchSubmitter ibc.Wallet

log *zap.Logger
}

func NewDAChain(log *zap.Logger, cosmosChain *cosmos.CosmosChain, chainType ophosttypes.BatchInfo_ChainType, batchSubmitter ibc.Wallet) *DAChain {
return &DAChain{
log: log,
CosmosChain: cosmosChain,
ChainType: chainType,
BatchSubmitter: batchSubmitter,
}
}

func (da *DAChain) GetNode() *DAChainNode {
return NewDAChainNode(da.log, da.CosmosChain.GetNode())
}

func (da *DAChain) GetFullNode() *DAChainNode {
return NewDAChainNode(da.log, da.CosmosChain.GetFullNode())
}

func (da *DAChain) QueryBatchData(ctx context.Context) ([][]byte, error) {
switch da.ChainType {
case ophosttypes.BatchInfo_CHAIN_TYPE_INITIA:
return da.QueryInitiaBatchData(ctx)
case ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA:
return da.QueryCelestiaBatchData(ctx)
}
return nil, fmt.Errorf("unsupported chain type")
}

func (da *DAChain) QueryInitiaBatchData(ctx context.Context) ([][]byte, error) {
if da.ChainType != ophosttypes.BatchInfo_CHAIN_TYPE_INITIA {
return nil, fmt.Errorf("unmatched chain type")
}

data := [][]byte{}
page := 1
perPage := 100

for {
txsResult, err := da.GetFullNode().Client.TxSearch(ctx, "message.action='/opinit.ophost.v1.MsgRecordBatch'", false, &page, &perPage, "asc")
if err != nil {
return nil, err
}

for _, tx := range txsResult.Txs {
var raw txv1beta1.TxRaw
if err := proto.Unmarshal(tx.Tx, &raw); err != nil {
return nil, err
}

var body txv1beta1.TxBody
if err := proto.Unmarshal(raw.BodyBytes, &body); err != nil {
return nil, err
}

if len(body.Messages) == 0 {
continue
}

recordBatch := new(ophostv1.MsgRecordBatch)
if err := body.Messages[0].UnmarshalTo(recordBatch); err != nil {
return nil, err
}
data = append(data, recordBatch.BatchBytes)
}

if txsResult.TotalCount <= page*100 {
break
}
}
return data, nil
}

func (da *DAChain) QueryCelestiaBatchData(ctx context.Context) ([][]byte, error) {
if da.ChainType != ophosttypes.BatchInfo_CHAIN_TYPE_CELESTIA {
return nil, fmt.Errorf("unmatched chain type")
}

data := [][]byte{}
page := 1
perPage := 100

var lastBlock *comettypes.Block

for {
txsResult, err := da.GetFullNode().Client.TxSearch(ctx, "message.action='/celestia.blob.v1.MsgPayForBlobs'", false, &page, &perPage, "asc")
if err != nil {
return nil, err
}

for _, tx := range txsResult.Txs {
if lastBlock == nil || tx.Height != lastBlock.Height {
blockResult, err := da.GetFullNode().Client.Block(ctx, &tx.Height)
if err != nil {
return nil, err
}
lastBlock = blockResult.Block
}

var blobTx opcelestia.BlobTx
err = blobTx.Unmarshal(lastBlock.Txs[tx.Index])
if err != nil {
return nil, err
}
data = append(data, blobTx.Blobs[0].Data)
}

if txsResult.TotalCount <= page*100 {
break
}
}
return data, nil
}
Loading

0 comments on commit 29f6f5f

Please sign in to comment.