Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add ability to control DA required by blocks produced by the builder #421

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions core/txpool/legacypool/legacypool.go
Original file line number Diff line number Diff line change
Expand Up @@ -568,9 +568,23 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
}
}
}
if filter.MaxDATxSize != nil && !pool.locals.contains(addr) {
sebastianst marked this conversation as resolved.
Show resolved Hide resolved
for i, tx := range txs {
estimate := types.EstimatedL1SizeScaled(tx.RollupCostData())
estimate = estimate.Div(estimate, big.NewInt(1e6))
if estimate.Cmp(filter.MaxDATxSize) > 0 {
log.Debug("filtering tx that exceeds max da tx size",
"hash", tx.Hash(), "txda", estimate, "dalimit", filter.MaxDATxSize)
txs = txs[:i]
sebastianst marked this conversation as resolved.
Show resolved Hide resolved
break
}
}
}
if len(txs) > 0 {
lazies := make([]*txpool.LazyTransaction, len(txs))
for i := 0; i < len(txs); i++ {
daBytes := types.EstimatedL1SizeScaled(txs[i].RollupCostData())
sebastianst marked this conversation as resolved.
Show resolved Hide resolved
daBytes = daBytes.Div(daBytes, big.NewInt(1e6))
lazies[i] = &txpool.LazyTransaction{
Pool: pool,
Hash: txs[i].Hash(),
Expand All @@ -580,6 +594,7 @@ func (pool *LegacyPool) Pending(filter txpool.PendingFilter) map[common.Address]
GasTipCap: uint256.MustFromBig(txs[i].GasTipCap()),
Gas: txs[i].Gas(),
BlobGas: txs[i].BlobGas(),
DABytes: daBytes,
}
}
pending[addr] = lazies
Expand Down
6 changes: 6 additions & 0 deletions core/txpool/subpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,8 @@ type LazyTransaction struct {

Gas uint64 // Amount of gas required by the transaction
BlobGas uint64 // Amount of blob gas required by the transaction

DABytes *big.Int // Amount of data availability bytes this transaction may require if this is a rollup
}

// Resolve retrieves the full transaction belonging to a lazy handle if it is still
Expand Down Expand Up @@ -83,6 +85,10 @@ type PendingFilter struct {

OnlyPlainTxs bool // Return only plain EVM transactions (peer-join announces, block space filling)
OnlyBlobTxs bool // Return only blob transactions (block blob-space filling)

// OP stack addition: Maximum l1 data size allowed for an included transaction (for throttling
// when batcher is backlogged). Ignored if nil.
MaxDATxSize *big.Int
}

// SubPool represents a specialized transaction pool that lives on its own (e.g.
Expand Down
7 changes: 7 additions & 0 deletions eth/api_miner.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,3 +56,10 @@ func (api *MinerAPI) SetGasLimit(gasLimit hexutil.Uint64) bool {
api.e.Miner().SetGasCeil(uint64(gasLimit))
return true
}

// SetMaxDASize sets the maximum data availability size of any tx allowed in a block, and the total max l1 data size of
// the block. 0 means no maximum.
func (api *MinerAPI) SetMaxDASize(maxTxSize hexutil.Big, maxBlockSize hexutil.Big) bool {
api.e.Miner().SetMaxDASize(maxTxSize.ToInt(), maxBlockSize.ToInt())
return true
}
6 changes: 6 additions & 0 deletions internal/web3ext/web3ext.go
Original file line number Diff line number Diff line change
Expand Up @@ -671,6 +671,12 @@ web3._extend({
params: 1,
inputFormatter: [web3._extend.utils.fromDecimal]
}),
new web3._extend.Method({
name: 'setMaxDASize',
call: 'miner_setMaxDASize',
params: 2,
inputFormatter: [web3._extend.utils.fromDecimal, web3._extend.utils.fromDecimal]
}),
],
properties: []
});
Expand Down
20 changes: 19 additions & 1 deletion miner/miner.go
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,9 @@ type Config struct {
RollupComputePendingBlock bool // Compute the pending block from tx-pool, instead of copying the latest-block
RollupTransactionConditionalRateLimit int // Total number of conditional cost units allowed in a second

EffectiveGasCeil uint64 // if non-zero, a gas ceiling to apply independent of the header's gaslimit value
EffectiveGasCeil uint64 // if non-zero, a gas ceiling to apply independent of the header's gaslimit value
MaxDATxSize *big.Int // if non-nil, don't include any txs with data availability size larger than this in any built block
MaxDABlockSize *big.Int // if non-nil, then don't build a block requiring more than this amount of total data availability
}

// DefaultConfig contains default settings for miner.
Expand Down Expand Up @@ -152,6 +154,22 @@ func (miner *Miner) SetGasTip(tip *big.Int) error {
return nil
}

// SetMaxDASize sets the maximum data availability size currently allowed for inclusion. 0 means no maximum.
func (miner *Miner) SetMaxDASize(maxTxSize, maxBlockSize *big.Int) {
miner.confMu.Lock()
if maxTxSize == nil || maxTxSize.BitLen() == 0 {
miner.config.MaxDATxSize = nil
} else {
miner.config.MaxDATxSize = new(big.Int).Set(maxTxSize)
}
if maxBlockSize == nil || maxBlockSize.BitLen() == 0 {
miner.config.MaxDABlockSize = nil
} else {
miner.config.MaxDABlockSize = new(big.Int).Set(maxBlockSize)
}
miner.confMu.Unlock()
}

roberto-bayardo marked this conversation as resolved.
Show resolved Hide resolved
// BuildPayload builds the payload according to the provided parameters.
func (miner *Miner) BuildPayload(args *BuildPayloadArgs, witness bool) (*Payload, error) {
return miner.buildPayload(args, witness)
Expand Down
71 changes: 65 additions & 6 deletions miner/payload_building_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package miner

import (
"bytes"
"crypto/rand"
"math/big"
"reflect"
"testing"
Expand Down Expand Up @@ -65,10 +66,14 @@ var (
testConfig = Config{
PendingFeeRecipient: testBankAddress,
Recommit: time.Second,
GasCeil: params.GenesisGasLimit,
GasCeil: 50_000_000,
sebastianst marked this conversation as resolved.
Show resolved Hide resolved
}
)

const (
numDAFilterTxs = 256
)

func init() {
testTxPoolConfig = legacypool.DefaultConfig
testTxPoolConfig.Journal = ""
Expand Down Expand Up @@ -150,7 +155,7 @@ func (b *testWorkerBackend) TxPool() *txpool.TxPool { return b.txPool }

func newTestWorker(t *testing.T, chainConfig *params.ChainConfig, engine consensus.Engine, db ethdb.Database, blocks int) (*Miner, *testWorkerBackend) {
backend := newTestWorkerBackend(t, chainConfig, engine, db, blocks)
backend.txPool.Add(pendingTxs, true, true)
backend.txPool.Add(pendingTxs, false, true)
w := New(backend, testConfig, engine)
return w, backend
}
Expand All @@ -173,6 +178,25 @@ func TestBuildPayload(t *testing.T) {
t.Run("with-zero-params", func(t *testing.T) { testBuildPayload(t, true, false, zeroParams) })
}

func TestDAFilters(t *testing.T) {
// Each test case inserts one pending small (DA cost 100) transaction followed by
// numDAFilterTxs transactions that have random calldata (min DA size >> 100)
totalTxs := numDAFilterTxs + 1

// Very low max should filter all transactions.
t.Run("with-tx-filter-max-filters-all", func(t *testing.T) { testDAFilters(t, big.NewInt(1), nil, 0) })
t.Run("with-block-filter-max-filters-all", func(t *testing.T) { testDAFilters(t, nil, big.NewInt(1), 0) })
// Very high max should filter nothing.
t.Run("with-tx-filter-max-too-high", func(t *testing.T) { testDAFilters(t, big.NewInt(1000000), nil, totalTxs) })
t.Run("with-block-filter-max-too-high", func(t *testing.T) { testDAFilters(t, nil, big.NewInt(1000000), totalTxs) })
// The first transaction has size 100, all other DA test txs are bigger due to random Data, so should get filtered.
t.Run("with-tx-filter-all-but-first", func(t *testing.T) { testDAFilters(t, big.NewInt(100), nil, 1) })
t.Run("with-block-filter-all-but-first", func(t *testing.T) { testDAFilters(t, nil, big.NewInt(100), 1) })
// Zero/nil values for these parameters means we should mean never filter
roberto-bayardo marked this conversation as resolved.
Show resolved Hide resolved
t.Run("with-zero-tx-filters", func(t *testing.T) { testDAFilters(t, big.NewInt(0), big.NewInt(0), totalTxs) })
t.Run("with-zero-tx-filters", func(t *testing.T) { testDAFilters(t, nil, nil, totalTxs) })
roberto-bayardo marked this conversation as resolved.
Show resolved Hide resolved
}

func holoceneConfig() *params.ChainConfig {
config := *params.TestChainConfig
config.LondonBlock = big.NewInt(0)
Expand Down Expand Up @@ -204,18 +228,20 @@ func newPayloadArgs(parentHash common.Hash, params1559 []byte) *BuildPayloadArgs
func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte) {
t.Parallel()
db := rawdb.NewMemoryDatabase()

config := params.TestChainConfig
if len(params1559) != 0 {
config = holoceneConfig()
}
w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0)

const numInterruptTxs = 256

if interrupt {
// when doing interrupt testing, create a large pool so interruption will
// definitely be visible.
txs := genTxs(1, numInterruptTxs)
b.txPool.Add(txs, true, false)
b.txPool.Add(txs, false, false)
}

args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559)
Expand Down Expand Up @@ -294,6 +320,30 @@ func testBuildPayload(t *testing.T, noTxPool, interrupt bool, params1559 []byte)
}
}

func testDAFilters(t *testing.T, maxDATxSize, maxDABlockSize *big.Int, expectedTxCount int) {
t.Parallel()
db := rawdb.NewMemoryDatabase()
config := holoceneConfig()
w, b := newTestWorker(t, config, ethash.NewFaker(), db, 0)
w.SetMaxDASize(maxDATxSize, maxDABlockSize)
txs := genTxs(1, numDAFilterTxs)
b.txPool.Add(txs, false, false)

params1559 := []byte{0, 1, 2, 3, 4, 5, 6, 7}
args := newPayloadArgs(b.chain.CurrentBlock().Hash(), params1559)
args.NoTxPool = false

payload, err := w.buildPayload(args, false)
if err != nil {
t.Fatalf("Failed to build payload %v", err)
}
payload.WaitFull()
result := payload.ResolveFull().ExecutionPayload
if len(result.Transactions) != expectedTxCount {
t.Fatalf("Unexpected transaction set: got %d, expected %d", len(result.Transactions), expectedTxCount)
}
}

func testBuildPayloadWrongConfig(t *testing.T, params1559 []byte) {
t.Parallel()
db := rawdb.NewMemoryDatabase()
Expand Down Expand Up @@ -331,14 +381,23 @@ func genTxs(startNonce, count uint64) types.Transactions {
txs := make(types.Transactions, 0, count)
signer := types.LatestSigner(params.TestChainConfig)
for nonce := startNonce; nonce < startNonce+count; nonce++ {
txs = append(txs, types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{
// generate incompressible data to put in the tx for DA filter testing. each of these
// txs will be bigger than the 100 minimum.
randomBytes := make([]byte, 100)
_, err := rand.Read(randomBytes)
if err != nil {
panic(err)
roberto-bayardo marked this conversation as resolved.
Show resolved Hide resolved
}
tx := types.MustSignNewTx(testBankKey, signer, &types.AccessListTx{
ChainID: params.TestChainConfig.ChainID,
Nonce: nonce,
To: &testUserAddress,
Value: big.NewInt(1000),
Gas: params.TxGas,
Gas: params.TxGas + uint64(len(randomBytes))*16,
GasPrice: big.NewInt(params.InitialBaseFee),
}))
Data: randomBytes,
})
txs = append(txs, tx)
}
return txs
}
Expand Down
15 changes: 14 additions & 1 deletion miner/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -415,6 +415,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
if env.gasPool == nil {
env.gasPool = new(core.GasPool).AddGas(gasLimit)
}
blockDABytes := new(big.Int)
for {
// Check interruption signal and abort building if it's fired.
if interrupt != nil {
Expand Down Expand Up @@ -468,6 +469,16 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran
txs.Pop()
continue
}
daBytesAfter := new(big.Int)
if ltx.DABytes != nil && miner.config.MaxDABlockSize != nil {
daBytesAfter.Add(blockDABytes, ltx.DABytes)
if daBytesAfter.Cmp(miner.config.MaxDABlockSize) > 0 {
log.Debug("adding tx would exceed block DA size limit",
"hash", ltx.Hash, "txda", ltx.DABytes, "blockda", blockDABytes, "dalimit", miner.config.MaxDABlockSize)
txs.Pop()
sebastianst marked this conversation as resolved.
Show resolved Hide resolved
continue
}
}
// Transaction seems to fit, pull it up from the pool
tx := ltx.Resolve()
if tx == nil {
Expand Down Expand Up @@ -507,6 +518,7 @@ func (miner *Miner) commitTransactions(env *environment, plainTxs, blobTxs *tran

case errors.Is(err, nil):
// Everything ok, collect the logs and shift in the next transaction from the same account
blockDABytes = daBytesAfter
txs.Shift()

default:
Expand All @@ -529,7 +541,8 @@ func (miner *Miner) fillTransactions(interrupt *atomic.Int32, env *environment)

// Retrieve the pending transactions pre-filtered by the 1559/4844 dynamic fees
filter := txpool.PendingFilter{
MinTip: uint256.MustFromBig(tip),
MinTip: uint256.MustFromBig(tip),
MaxDATxSize: miner.config.MaxDATxSize,
}
if env.header.BaseFee != nil {
filter.BaseFee = uint256.MustFromBig(env.header.BaseFee)
Expand Down