diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index d6cbcffeb4..f66474d6d4 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -15,7 +15,7 @@ concurrency: cancel-in-progress: true jobs: - tests: + build: if: (github.event.action != 'closed' || github.event.pull_request.merged == true) strategy: matrix: @@ -35,10 +35,6 @@ jobs: if: runner.os == 'Linux' run: sudo apt update && sudo apt install build-essential - - name: Golang-ci install - if: runner.os == 'Linux' - run: make lintci-deps - - uses: actions/cache@v3 with: path: | @@ -51,9 +47,62 @@ jobs: - name: Build run: make all - # - name: Lint - # if: runner.os == 'Linux' - # run: make lint + lint: + if: (github.event.action != 'closed' || github.event.pull_request.merged == true) + strategy: + matrix: + os: [ ubuntu-20.04 ] # list of os: https://github.com/actions/virtual-environments + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - run: | + git submodule update --init --recursive --force + git fetch --no-tags --prune --depth=1 origin +refs/heads/master:refs/remotes/origin/master + + - uses: actions/setup-go@v3 + with: + go-version: 1.20.x + + - name: Install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - name: Golang-ci install + if: runner.os == 'Linux' + run: make lintci-deps + + - name: Lint + if: runner.os == 'Linux' + run: make lint + + unit-tests: + if: (github.event.action != 'closed' || github.event.pull_request.merged == true) + strategy: + matrix: + os: [ ubuntu-20.04 ] # list of os: https://github.com/actions/virtual-environments + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - run: | + git submodule update --init --recursive --force + git fetch --no-tags --prune --depth=1 origin +refs/heads/master:refs/remotes/origin/master + + - uses: actions/setup-go@v3 + with: + go-version: 1.20.x + + - name: Install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/Library/Caches/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go- - name: Test run: make test @@ -61,14 +110,6 @@ jobs: #- name: Data race tests # run: make test-race - - name: test-integration - run: make test-integration - - - name: Upload coverage to Codecov - uses: codecov/codecov-action@v1 - with: - file: ./cover.out - # # TODO: make it work # - name: Reproducible build test # run: | @@ -81,6 +122,50 @@ jobs: # fi integration-tests: + if: (github.event.action != 'closed' || github.event.pull_request.merged == true) + strategy: + matrix: + os: [ ubuntu-20.04 ] # list of os: https://github.com/actions/virtual-environments + runs-on: ${{ matrix.os }} + steps: + - uses: actions/checkout@v3 + - run: | + git submodule update --init --recursive --force + git fetch --no-tags --prune --depth=1 origin +refs/heads/master:refs/remotes/origin/master + + - uses: actions/setup-go@v3 + with: + go-version: 1.20.x + + - name: Install dependencies on Linux + if: runner.os == 'Linux' + run: sudo apt update && sudo apt install build-essential + + - uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/Library/Caches/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go- + + - name: test-integration + run: make test-integration + + codecov: + if: (github.event.action != 'closed' || github.event.pull_request.merged == true) + strategy: + matrix: + os: [ ubuntu-20.04 ] # list of os: https://github.com/actions/virtual-environments + runs-on: ${{ matrix.os }} + steps: + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./cover.out + + e2e-tests: if: (github.event.action != 'closed' || github.event.pull_request.merged == true) strategy: matrix: @@ -154,7 +239,7 @@ jobs: cd matic-cli/devnet/code/contracts npm run truffle exec scripts/deposit.js -- --network development $(jq -r .root.tokens.MaticToken contractAddresses.json) 100000000000000000000 cd - - timeout 20m bash bor/integration-tests/smoke_test.sh + timeout 60m bash bor/integration-tests/smoke_test.sh - name: Upload logs if: always() diff --git a/README.md b/README.md index 29e4837df1..a206a3e369 100644 --- a/README.md +++ b/README.md @@ -1,111 +1,71 @@ # Bor Overview -Bor is the Official Golang implementation of the Matic protocol. It is a fork of Go Ethereum - https://github.com/ethereum/go-ethereum and EVM compatible. +Bor is the Official Golang implementation of the Polygon PoS blockchain. It is a fork of [geth](https://github.com/ethereum/go-ethereum) and is EVM compatible (upto London fork). -![Forks](https://img.shields.io/github/forks/maticnetwork/bor?style=social) -![Stars](https://img.shields.io/github/stars/maticnetwork/bor?style=social) -![Languages](https://img.shields.io/github/languages/count/maticnetwork/bor) -![Issues](https://img.shields.io/github/issues/maticnetwork/bor) -![PRs](https://img.shields.io/github/issues-pr-raw/maticnetwork/bor) +[![API Reference]( +https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 +)](https://pkg.go.dev/github.com/maticnetwork/bor) +[![Go Report Card](https://goreportcard.com/badge/github.com/maticnetwork/bor)](https://goreportcard.com/report/github.com/maticnetwork/bor) ![MIT License](https://img.shields.io/github/license/maticnetwork/bor) -![contributors](https://img.shields.io/github/contributors-anon/maticnetwork/bor) -![size](https://img.shields.io/github/languages/code-size/maticnetwork/bor) -![lines](https://img.shields.io/tokei/lines/github/maticnetwork/bor) [![Discord](https://img.shields.io/discord/714888181740339261?color=1C1CE1&label=Polygon%20%7C%20Discord%20%F0%9F%91%8B%20&style=flat-square)](https://discord.gg/zdwkdvMNY2) [![Twitter Follow](https://img.shields.io/twitter/follow/0xPolygon.svg?style=social)](https://twitter.com/0xPolygon) -## How to contribute +### Installing bor using packaging -### Contribution Guidelines -We believe one of the things that makes Polygon special is its coherent design and we seek to retain this defining characteristic. From the outset we defined some guidelines to ensure new contributions only ever enhance the project: +The easiest way to get started with bor is to install the packages using the command below. Refer to the [releases](https://github.com/maticnetwork/bor/releases) section to find the latest stable version of bor. + + curl -L https://raw.githubusercontent.com/maticnetwork/install/main/bor.sh | bash -s -- v0.4.0 -* Quality: Code in the Polygon project should meet the style guidelines, with sufficient test-cases, descriptive commit messages, evidence that the contribution does not break any compatibility commitments or cause adverse feature interactions, and evidence of high-quality peer-review -* Size: The Polygon project’s culture is one of small pull-requests, regularly submitted. The larger a pull-request, the more likely it is that you will be asked to resubmit as a series of self-contained and individually reviewable smaller PRs -* Maintainability: If the feature will require ongoing maintenance (eg support for a particular brand of database), we may ask you to accept responsibility for maintaining this feature -### Submit an issue +The network accepts `mainnet` or `mumbai` and the node type accepts `validator` or `sentry` or `archive`. The installation script does the following things: +- Create a new user named `bor`. +- Install the bor binary at `/usr/bin/bor`. +- Dump the suitable config file (based on the network and node type provided) at `/var/lib/bor` and uses it as the home dir. +- Create a systemd service named `bor` at `/lib/systemd/system/bor.service` which starts bor using the config file as `bor` user. -- Create a [new issue](https://github.com/maticnetwork/bor/issues/new/choose) -- Comment on the issue (if you'd like to be assigned to it) - that way [our team can assign the issue to you](https://github.blog/2019-06-25-assign-issues-to-issue-commenters/). -- If you do not have a specific contribution in mind, you can also browse the issues labelled as `help wanted` -- Issues that additionally have the `good first issue` label are considered ideal for first-timers +The releases supports both the networks i.e. Polygon Mainnet and Mumbai (Testnet) unless explicitly specified. Before the stable release for mainnet, pre-releases will be available marked with `beta` tag for deploying on Mumbai (testnet). On sufficient testing, stable release for mainnet will be announced with a forum post. -### Fork the repository (repo) - -- If you're not sure, here's how to [fork the repo](https://help.github.com/en/articles/fork-a-repo) - -- If this is your first time forking our repo, this is all you need to do for this step: +### Building from source +- Install Go (version 1.19 or later) and a C compiler. +- Clone the repository and build the binary using the following commands: + ```shell + make bor ``` - $ git clone git@github.com:[your_github_handle]/bor +- Start bor using the ideal config files for validator and sentry provided in the `packaging` folder. + ```shell + ./build/bin/bor server --config ./packaging/templates/mainnet-v1/sentry/sentry/bor/config.toml ``` - -- If you've already forked the repo, you'll want to ensure your fork is configured and that it's up to date. This will save you the headache of potential merge conflicts. - -- To [configure your fork](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/configuring-a-remote-for-a-fork): - +- To build full set of utilities, run: + ```shell + make all ``` - $ git remote add upstream https://github.com/maticnetwork/bor +- Run unit and integration tests + ```shell + make test && make test-integration ``` -- To [sync your fork with the latest changes](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/syncing-a-fork): +#### Using the new cli - ``` - $ git checkout master - $ git fetch upstream - $ git merge upstream/master - ``` +Post `v0.3.0` release, bor uses a new command line interface (cli). The new-cli (located at `internal/cli`) has been built with keeping the flag usage similar to old-cli (located at `cmd/geth`) with a few notable changes. Please refer to [docs](./docs) section for flag usage guide and example. -### Building the source +### Documentation -- Building `bor` requires both a Go (version 1.19 or later) and a C compiler. You can install -them using your favourite package manager. Once the dependencies are installed, run +- The official documentation for the Polygon PoS chain can be found [here](https://wiki.polygon.technology/docs/pos/getting-started/). It contains all the conceptual and architectural details of the chain along with operational guide for users running the nodes. +- New release announcements and discussions can be found on our [forum page](https://forum.polygon.technology/). +- Polygon improvement proposals can be found [here](https://github.com/maticnetwork/Polygon-Improvement-Proposals/) - ```shell - $ make bor - ``` +### Contribution guidelines -### Make awesome changes! +Thank you for considering helping out with the source code! We welcome contributions from anyone on the internet, and are grateful for even the smallest of fixes! If you'd like to contribute to bor, please fork, fix, commit and send a pull request for the maintainers to review and merge into the main code base. -1. Create new branch for your changes - - ``` - $ git checkout -b new_branch_name - ``` - -2. Commit and prepare for pull request (PR). In your PR commit message, reference the issue it resolves (see [how to link a commit message to an issue using a keyword](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword). - - - Checkout our [Git-Rules](https://wiki.polygon.technology/docs/contribute/orientation/#git-rules) - - ``` - $ git commit -m "brief description of changes [Fixes #1234]" - ``` - -3. Push to your GitHub account - - ``` - $ git push - ``` - -### Submit your PR - -- After your changes are committed to your GitHub fork, submit a pull request (PR) to the `master` branch of the `maticnetwork/bor` repo -- In your PR description, reference the issue it resolves (see [linking a pull request to an issue using a keyword](https://docs.github.com/en/free-pro-team@latest/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword)) - - ex. `Updates out of date content [Fixes #1234]` -- Why not say hi and draw attention to your PR in [our discord server](https://discord.gg/0xpolygon)? - -### Wait for review - -- The team reviews every PR -- Acceptable PRs will be approved & merged into the `master` branch - -
- -## Release - -- You can [view the history of releases](https://github.com/maticnetwork/bor/releases), which include PR highlights - -
+From the outset we defined some guidelines to ensure new contributions only ever enhance the project: +* Quality: Code in the Polygon project should meet the style guidelines, with sufficient test-cases, descriptive commit messages, evidence that the contribution does not break any compatibility commitments or cause adverse feature interactions, and evidence of high-quality peer-review. Code must adhere to the official Go [formatting](https://golang.org/doc/effective_go.html#formatting) guidelines (i.e. uses [gofmt](https://golang.org/cmd/gofmt/)). +* Testing: Please ensure that the updated code passes all the tests locally before submitting a pull request. In order to run unit tests, run `make test` and to run integration tests, run `make test-integration`. +* Size: The Polygon project’s culture is one of small pull-requests, regularly submitted. The larger a pull-request, the more likely it is that you will be asked to resubmit as a series of self-contained and individually reviewable smaller PRs. +* Maintainability: If the feature will require ongoing maintenance (e.g. support for a particular brand of database), we may ask you to accept responsibility for maintaining this feature +* Pull requests need to be based on and opened against the `develop` branch. +* PR title should be prefixed with package(s) they modify. + * E.g. "eth, rpc: make trace configs optional" ## License @@ -117,8 +77,6 @@ The go-ethereum binaries (i.e. all code inside of the `cmd` directory) are licen [GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html), also included in our repository in the `COPYING` file. -
- ## Join our Discord server Join Polygon community – share your ideas or just say hi over [on Discord](https://discord.gg/zdwkdvMNY2). diff --git a/builder/files/config.toml b/builder/files/config.toml index af06faf7c2..61b3796984 100644 --- a/builder/files/config.toml +++ b/builder/files/config.toml @@ -38,9 +38,9 @@ syncmode = "full" # nodekeyhex = "" [p2p.discovery] # v5disc = false - bootnodes = ["enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303", "enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303"] + bootnodes = ["enode://b8f1cc9c5d4403703fbf377116469667d2b1823c0daf16b7250aa576bacf399e42c3930ccfcb02c5df6879565a2b8931335565f0e8d3f8e72385ecf4a4bf160a@3.36.224.80:30303", "enode://8729e0c825f3d9cad382555f3e46dcff21af323e89025a0e6312df541f4a9e73abfa562d64906f5e59c51fe6f0501b3e61b07979606c56329c020ed739910759@54.194.245.5:30303"] # Uncomment below `bootnodes` field for Mumbai bootnode - # bootnodes = ["enode://095c4465fe509bd7107bbf421aea0d3ad4d4bfc3ff8f9fdc86f4f950892ae3bbc3e5c715343c4cf60c1c06e088e621d6f1b43ab9130ae56c2cacfd356a284ee4@18.213.200.99:30303"] + # bootnodes = ["enode://bdcd4786a616a853b8a041f53496d853c68d99d54ff305615cd91c03cd56895e0a7f6e9f35dbf89131044e2114a9a782b792b5661e3aff07faf125a98606a071@43.200.206.40:30303", "enode://209aaf7ed549cf4a5700fd833da25413f80a1248bd3aa7fe2a87203e3f7b236dd729579e5c8df61c97bf508281bae4969d6de76a7393bcbd04a0af70270333b3@54.216.248.9:30303"] # bootnodesv4 = [] # bootnodesv5 = [] # static-nodes = [] diff --git a/common/types.go b/common/types.go index da0b953611..e51b4f9b67 100644 --- a/common/types.go +++ b/common/types.go @@ -29,8 +29,9 @@ import ( "strconv" "strings" - "github.com/ethereum/go-ethereum/common/hexutil" "golang.org/x/crypto/sha3" + + "github.com/ethereum/go-ethereum/common/hexutil" ) // Lengths of hashes and addresses in bytes. @@ -67,6 +68,12 @@ func BigToHash(b *big.Int) Hash { return BytesToHash(b.Bytes()) } // If b is larger than len(h), b will be cropped from the left. func HexToHash(s string) Hash { return BytesToHash(FromHex(s)) } +func HexToRefHash(s string) *Hash { + v := BytesToHash(FromHex(s)) + + return &v +} + // Cmp compares two hashes. func (h Hash) Cmp(other Hash) int { return bytes.Compare(h[:], other[:]) diff --git a/consensus/bor/api.go b/consensus/bor/api.go index 7dd1e8b071..6d72e309e3 100644 --- a/consensus/bor/api.go +++ b/consensus/bor/api.go @@ -341,10 +341,6 @@ func (api *API) GetRootHash(start uint64, end uint64) (string, error) { return root, nil } -func (api *API) GetVoteOnHash(starBlockNr uint64, endBlockNr uint64, hash string, milestoneId string) (bool, error) { - return false, nil -} - func (api *API) initializeRootHashCache() error { var err error if api.rootHashCache == nil { diff --git a/core/bench_test.go b/core/bench_test.go index 3077e4694b..528f28a3ee 100644 --- a/core/bench_test.go +++ b/core/bench_test.go @@ -190,7 +190,7 @@ func benchInsertChain(b *testing.B, disk bool, gen func(int, *BlockGen)) { } else { dir := b.TempDir() - db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false) + db, err = rawdb.NewLevelDBDatabase(dir, 128, 128, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("cannot create temporary database: %v", err) } @@ -293,7 +293,7 @@ func makeChainForBench(db ethdb.Database, full bool, count uint64) { func benchWriteChain(b *testing.B, full bool, count uint64) { for i := 0; i < b.N; i++ { dir := b.TempDir() - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) @@ -307,7 +307,7 @@ func benchWriteChain(b *testing.B, full bool, count uint64) { func benchReadChain(b *testing.B, full bool, count uint64) { dir := b.TempDir() - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } @@ -322,7 +322,7 @@ func benchReadChain(b *testing.B, full bool, count uint64) { b.ResetTimer() for i := 0; i < b.N; i++ { - db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false) + db, err := rawdb.NewLevelDBDatabase(dir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("error opening database at %v: %v", dir, err) } diff --git a/core/rawdb/database.go b/core/rawdb/database.go index 0801916db3..d5c4e0f395 100644 --- a/core/rawdb/database.go +++ b/core/rawdb/database.go @@ -321,8 +321,8 @@ func NewMemoryDatabaseWithCap(size int) ethdb.Database { // NewLevelDBDatabase creates a persistent key-value database without a freezer // moving immutable chain segments into cold storage. -func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) { - db, err := leveldb.New(file, cache, handles, namespace, readonly) +func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool, extraDBConfig ExtraDBConfig) (ethdb.Database, error) { + db, err := leveldb.New(file, cache, handles, namespace, readonly, resolveLevelDBConfig(extraDBConfig)) if err != nil { return nil, err } @@ -332,6 +332,15 @@ func NewLevelDBDatabase(file string, cache int, handles int, namespace string, r return NewDatabase(db), nil } +func resolveLevelDBConfig(config ExtraDBConfig) leveldb.LevelDBConfig { + return leveldb.LevelDBConfig{ + CompactionTableSize: config.LevelDBCompactionTableSize, + CompactionTableSizeMultiplier: config.LevelDBCompactionTableSizeMultiplier, + CompactionTotalSize: config.LevelDBCompactionTotalSize, + CompactionTotalSizeMultiplier: config.LevelDBCompactionTotalSizeMultiplier, + } +} + const ( dbPebble = "pebble" dbLeveldb = "leveldb" @@ -404,7 +413,7 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { } if o.Type == dbLeveldb || existingDb == dbLeveldb { log.Info("Using leveldb as the backing database") - return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.ExtraDBConfig) } // No pre-existing database, no user-requested one either. Default to Pebble // on supported platforms and LevelDB on anything else. @@ -413,8 +422,13 @@ func openKeyValueDatabase(o OpenOptions) (ethdb.Database, error) { return NewPebbleDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) } else { log.Info("Defaulting to leveldb as the backing database") - return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly) + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.ExtraDBConfig) } + + // Use leveldb, either as default (no explicit choice), or pre-existing, or chosen explicitly + log.Info("Using leveldb as the backing database") + + return NewLevelDBDatabase(o.Directory, o.Cache, o.Handles, o.Namespace, o.ReadOnly, o.ExtraDBConfig) } // Open opens both a disk-based key-value database such as leveldb or pebble, but also diff --git a/core/state/state_test.go b/core/state/state_test.go index bcab21f3a2..eedc846f5b 100644 --- a/core/state/state_test.go +++ b/core/state/state_test.go @@ -22,6 +22,8 @@ import ( "math/big" "testing" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/rawdb" "github.com/ethereum/go-ethereum/core/types" @@ -313,3 +315,66 @@ func compareStateObjects(so0, so1 *stateObject, t *testing.T) { } } } + +func TestValidateKnownAccounts(t *testing.T) { + t.Parallel() + + knownAccounts := make(types.KnownAccounts) + + types.InsertKnownAccounts(knownAccounts, common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add1"), common.HexToHash("0x2d6f8a898e7dec0bb7a50e8c142be32d7c98c096ff68ed57b9b08280d9aca1ce")) + types.InsertKnownAccounts(knownAccounts, common.HexToAddress("0xadd2add2add2add2add2add2add2add2add2add2"), map[common.Hash]common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000aaa"): common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000bbb"), + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ccc"): common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ddd"), + }) + + stateobjaddr1 := common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add1") + stateobjaddr2 := common.HexToAddress("0xadd2add2add2add2add2add2add2add2add2add2") + + storageaddr1 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000zzz") + storageaddr21 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000aaa") + storageaddr22 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ccc") + + data1 := common.BytesToHash([]byte{24}) + data21 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000bbb") + data22 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ddd") + + s := newStateTest() + + // set initial state object value + s.state.SetState(stateobjaddr1, storageaddr1, data1) + s.state.SetState(stateobjaddr2, storageaddr21, data21) + s.state.SetState(stateobjaddr2, storageaddr22, data22) + + require.NoError(t, s.state.ValidateKnownAccounts(knownAccounts)) + + types.InsertKnownAccounts(knownAccounts, common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add2"), common.HexToHash("0x2d6f8a898e7dec0bb7a50e8c142be32d7c98c096ff68ed57b9b08280d9aca1cf")) + + stateobjaddr3 := common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add2") + storageaddr3 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000yyy") + data3 := common.BytesToHash([]byte{24}) + + s.state.SetState(stateobjaddr3, storageaddr3, data3) + + // expected error + err := s.state.ValidateKnownAccounts(knownAccounts) + require.Error(t, err, "should have been an error") + + // correct the previous mistake "0x2d6f8a898e7dec0bb7a50e8c142be32d7c98c096ff68ed57b9b08280d9aca1cf" -> "0x2d6f8a898e7dec0bb7a50e8c142be32d7c98c096ff68ed57b9b08280d9aca1ce" + types.InsertKnownAccounts(knownAccounts, common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add2"), common.HexToHash("0x2d6f8a898e7dec0bb7a50e8c142be32d7c98c096ff68ed57b9b08280d9aca1ce")) + types.InsertKnownAccounts(knownAccounts, common.HexToAddress("0xadd2add2add2add2add2add2add2add2add2add3"), map[common.Hash]common.Hash{ + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000aaa"): common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000bbb"), + common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ccc"): common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ddd"), + }) + + stateobjaddr4 := common.HexToAddress("0xadd2add2add2add2add2add2add2add2add2add3") + storageaddr41 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000aaa") + storageaddr42 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000ccc") + data4 := common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000bbb") + + s.state.SetState(stateobjaddr4, storageaddr41, data4) + s.state.SetState(stateobjaddr4, storageaddr42, data4) + + // expected error + err = s.state.ValidateKnownAccounts(knownAccounts) + require.Error(t, err, "should have been an error") +} diff --git a/core/state/statedb.go b/core/state/statedb.go index dccb185734..b20b966170 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -1843,6 +1843,39 @@ func (s *StateDB) SlotInAccessList(addr common.Address, slot common.Hash) (addre return s.accessList.Contains(addr, slot) } +func (s *StateDB) ValidateKnownAccounts(knownAccounts types.KnownAccounts) error { + if knownAccounts == nil { + return nil + } + + for k, v := range knownAccounts { + // check if the value is hex string or an object + switch { + case v.IsSingle(): + trie, _ := s.StorageTrie(k) + if trie != nil { + actualRootHash := trie.Hash() + if *v.Single != actualRootHash { + return fmt.Errorf("invalid root hash for: %v root hash: %v actual root hash: %v", k, v.Single, actualRootHash) + } + } else { + return fmt.Errorf("Storage Trie is nil for: %v", k) + } + case v.IsStorage(): + for slot, value := range v.Storage { + actualValue := s.GetState(k, slot) + if value != actualValue { + return fmt.Errorf("invalid slot value at address: %v slot: %v value: %v actual value: %v", k, slot, value, actualValue) + } + } + default: + return fmt.Errorf("impossible to validate known accounts: %v", k) + } + } + + return nil +} + // convertAccountSet converts a provided account set from address keyed to hash keyed. func (s *StateDB) convertAccountSet(set map[common.Address]*types.StateAccount) map[common.Hash]struct{} { ret := make(map[common.Hash]struct{}, len(set)) diff --git a/core/txpool/legacypool/legacypool_test.go b/core/txpool/legacypool/legacypool_test.go index e9128f5215..870631902e 100644 --- a/core/txpool/legacypool/legacypool_test.go +++ b/core/txpool/legacypool/legacypool_test.go @@ -1981,6 +1981,7 @@ func TestUnderpricing(t *testing.T) { keys[i], _ = crypto.GenerateKey() testAddBalance(pool, crypto.PubkeyToAddress(keys[i].PublicKey), big.NewInt(1000000)) } + // Generate and queue a batch of transactions, both pending and queued txs := types.Transactions{} @@ -2011,6 +2012,7 @@ func TestUnderpricing(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Ensure that adding an underpriced transaction on block limit fails if err := pool.addRemoteSync(pricedTransaction(0, 100000, big.NewInt(1), keys[1])); !errors.Is(err, txpool.ErrUnderpriced) { t.Fatalf("adding underpriced pending transaction error mismatch: have %v, want %v", err, txpool.ErrUnderpriced) @@ -2050,6 +2052,7 @@ func TestUnderpricing(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Ensure that adding local transactions can push out even higher priced ones ltx = pricedTransaction(1, 100000, big.NewInt(0), keys[2]) if err := pool.addLocal(ltx); err != nil { @@ -2250,6 +2253,7 @@ func TestUnderpricingDynamicFee(t *testing.T) { if err := validatePoolInternals(pool); err != nil { t.Fatalf("pool internal state corrupted: %v", err) } + // Ensure that adding local transactions can push out even higher priced ones ltx = dynamicFeeTx(1, 100000, big.NewInt(0), big.NewInt(0), keys[2]) if err := pool.addLocal(ltx); err != nil { diff --git a/core/txpool/legacypool/list.go b/core/txpool/legacypool/list.go index 420b1f6de6..75b5c19aa8 100644 --- a/core/txpool/legacypool/list.go +++ b/core/txpool/legacypool/list.go @@ -29,6 +29,7 @@ import ( "github.com/ethereum/go-ethereum/common" cmath "github.com/ethereum/go-ethereum/common/math" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" ) @@ -559,6 +560,32 @@ func (l *list) Filter(costLimit *uint256.Int, gasLimit uint64) (types.Transactio return removed, invalids } +// FilterTxConditional returns the conditional transactions with invalid KnownAccounts +// TODO - We will also have to check block range and time stamp range! +func (l *list) FilterTxConditional(state *state.StateDB) types.Transactions { + removed := l.txs.filter(func(tx *types.Transaction) bool { + if options := tx.GetOptions(); options != nil { + err := state.ValidateKnownAccounts(options.KnownAccounts) + if err != nil { + log.Error("Error while Filtering Tx Conditional", "err", err) + return true + } + + return false + } + + return false + }) + + if len(removed) == 0 { + return nil + } + + l.txs.reheap(true) + + return removed +} + // Cap places a hard limit on the number of items, returning all transactions // exceeding that limit. func (l *list) Cap(threshold int) types.Transactions { diff --git a/core/txpool/legacypool/list_test.go b/core/txpool/legacypool/list_test.go index 1c820b0927..6dcb0eb599 100644 --- a/core/txpool/legacypool/list_test.go +++ b/core/txpool/legacypool/list_test.go @@ -22,7 +22,11 @@ import ( "testing" "github.com/holiman/uint256" + "github.com/stretchr/testify/require" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/rawdb" + "github.com/ethereum/go-ethereum/core/state" "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/crypto" ) @@ -78,3 +82,63 @@ func BenchmarkListAdd(b *testing.B) { } } } + +func TestFilterTxConditional(t *testing.T) { + t.Parallel() + + // Create an in memory state db to test against. + memDb := rawdb.NewMemoryDatabase() + db := state.NewDatabase(memDb) + state, _ := state.New(common.Hash{}, db, nil) + + // Create a private key to sign transactions. + key, _ := crypto.GenerateKey() + + // Create a list. + list := newList(true) + + // Create a transaction with no defined tx options + // and add to the list. + tx := transaction(0, 1000, key) + list.Add(tx, DefaultConfig.PriceBump) + + // There should be no drops at this point. + // No state has been modified. + drops := list.FilterTxConditional(state) + + count := len(drops) + require.Equal(t, 0, count, "got %d filtered by TxOptions when there should not be any", count) + + // Create another transaction with a known account storage root tx option + // and add to the list. + tx2 := transaction(1, 1000, key) + + var options types.OptionsAA4337 + + options.KnownAccounts = types.KnownAccounts{ + common.Address{19: 1}: &types.Value{ + Single: common.HexToRefHash("0xe734938daf39aae1fa4ee64dc3155d7c049f28b57a8ada8ad9e86832e0253bef"), + }, + } + + state.SetState(common.Address{19: 1}, common.Hash{}, common.Hash{30: 1}) + tx2.PutOptions(&options) + list.Add(tx2, DefaultConfig.PriceBump) + + // There should still be no drops as no state has been modified. + drops = list.FilterTxConditional(state) + + count = len(drops) + require.Equal(t, 0, count, "got %d filtered by TxOptions when there should not be any", count) + + // Set state that conflicts with tx2's policy + state.SetState(common.Address{19: 1}, common.Hash{}, common.Hash{31: 1}) + + // tx2 should be the single transaction filtered out + drops = list.FilterTxConditional(state) + + count = len(drops) + require.Equal(t, 1, count, "got %d filtered by TxOptions when there should be a single one", count) + + require.Equal(t, tx2, drops[0], "Got %x, expected %x", drops[0].Hash(), tx2.Hash()) +} diff --git a/core/types/block.go b/core/types/block.go index b3a55e04db..b3b8eea59d 100644 --- a/core/types/block.go +++ b/core/types/block.go @@ -186,6 +186,44 @@ func (h *Header) EmptyReceipts() bool { return h.ReceiptHash == EmptyReceiptsHash } +// ValidateBlockNumberOptions4337 validates the block range passed as in the options parameter in the conditional transaction (EIP-4337) +func (h *Header) ValidateBlockNumberOptions4337(minBlockNumber *big.Int, maxBlockNumber *big.Int) error { + currentBlockNumber := h.Number + + if minBlockNumber != nil { + if currentBlockNumber.Cmp(minBlockNumber) == -1 { + return fmt.Errorf("current block number %v is less than minimum block number: %v", currentBlockNumber, minBlockNumber) + } + } + + if maxBlockNumber != nil { + if currentBlockNumber.Cmp(maxBlockNumber) == 1 { + return fmt.Errorf("current block number %v is greater than maximum block number: %v", currentBlockNumber, maxBlockNumber) + } + } + + return nil +} + +// ValidateBlockNumberOptions4337 validates the timestamp range passed as in the options parameter in the conditional transaction (EIP-4337) +func (h *Header) ValidateTimestampOptions4337(minTimestamp *uint64, maxTimestamp *uint64) error { + currentBlockTime := h.Time + + if minTimestamp != nil { + if currentBlockTime < *minTimestamp { + return fmt.Errorf("current block time %v is less than minimum timestamp: %v", currentBlockTime, minTimestamp) + } + } + + if maxTimestamp != nil { + if currentBlockTime > *maxTimestamp { + return fmt.Errorf("current block time %v is greater than maximum timestamp: %v", currentBlockTime, maxTimestamp) + } + } + + return nil +} + // Body is a simple (mutable, non-safe) data container for storing and moving // a block's data contents (transactions and uncles) together. type Body struct { diff --git a/core/types/block_test.go b/core/types/block_test.go index 5d2a9ce7d1..218205e564 100644 --- a/core/types/block_test.go +++ b/core/types/block_test.go @@ -435,3 +435,167 @@ func TestRlpDecodeParentHash(t *testing.T) { } } } + +func TestValidateBlockNumberOptions4337(t *testing.T) { + t.Parallel() + + testsPass := []struct { + number string + header Header + minBlockNumber *big.Int + maxBlockNumber *big.Int + }{ + { + "1", + Header{Number: big.NewInt(10)}, + big.NewInt(0), + big.NewInt(20), + }, + { + "2", + Header{Number: big.NewInt(10)}, + big.NewInt(10), + big.NewInt(10), + }, + { + "3", + Header{Number: big.NewInt(10)}, + big.NewInt(10), + big.NewInt(11), + }, + { + "4", + Header{Number: big.NewInt(10)}, + big.NewInt(0), + big.NewInt(10), + }, + } + + testsFail := []struct { + number string + header Header + minBlockNumber *big.Int + maxBlockNumber *big.Int + }{ + { + "5", + Header{Number: big.NewInt(10)}, + big.NewInt(0), + big.NewInt(0), + }, + { + "6", + Header{Number: big.NewInt(10)}, + big.NewInt(0), + big.NewInt(9), + }, + { + "7", + Header{Number: big.NewInt(10)}, + big.NewInt(11), + big.NewInt(9), + }, + { + "8", + Header{Number: big.NewInt(10)}, + big.NewInt(11), + big.NewInt(20), + }, + } + + for _, test := range testsPass { + if err := test.header.ValidateBlockNumberOptions4337(test.minBlockNumber, test.maxBlockNumber); err != nil { + t.Fatalf("test number %v should not have failed. err: %v", test.number, err) + } + } + + for _, test := range testsFail { + if err := test.header.ValidateBlockNumberOptions4337(test.minBlockNumber, test.maxBlockNumber); err == nil { + t.Fatalf("test number %v should have failed. err is nil", test.number) + } + } +} + +func TestValidateTimestampOptions4337(t *testing.T) { + t.Parallel() + + u64Ptr := func(n uint64) *uint64 { + return &n + } + + testsPass := []struct { + number string + header Header + minTimestamp *uint64 + maxTimestamp *uint64 + }{ + { + "1", + Header{Time: 1600000000}, + u64Ptr(1500000000), + u64Ptr(1700000000), + }, + { + "2", + Header{Time: 1600000000}, + u64Ptr(1600000000), + u64Ptr(1600000000), + }, + { + "3", + Header{Time: 1600000000}, + u64Ptr(1600000000), + u64Ptr(1700000000), + }, + { + "4", + Header{Time: 1600000000}, + u64Ptr(1500000000), + u64Ptr(1600000000), + }, + } + + testsFail := []struct { + number string + header Header + minTimestamp *uint64 + maxTimestamp *uint64 + }{ + { + "5", + Header{Time: 1600000000}, + u64Ptr(1500000000), + u64Ptr(1500000000), + }, + { + "6", + Header{Time: 1600000000}, + u64Ptr(1400000000), + u64Ptr(1500000000), + }, + { + "7", + Header{Time: 1600000000}, + u64Ptr(1700000000), + u64Ptr(1500000000), + }, + { + "8", + Header{Time: 1600000000}, + u64Ptr(1700000000), + u64Ptr(1800000000), + }, + } + + for _, test := range testsPass { + if err := test.header.ValidateTimestampOptions4337(test.minTimestamp, test.maxTimestamp); err != nil { + t.Fatalf("test number %v should not have failed. err: %v", test.number, err) + } + } + + for _, test := range testsFail { + if err := test.header.ValidateTimestampOptions4337(test.minTimestamp, test.maxTimestamp); err == nil { + t.Fatalf("test number %v should have failed. err is nil", test.number) + } + } +} diff --git a/core/types/transaction.go b/core/types/transaction.go index 93244c625f..7af38f7b19 100644 --- a/core/types/transaction.go +++ b/core/types/transaction.go @@ -53,6 +53,9 @@ type Transaction struct { inner TxData // Consensus contents of a transaction time time.Time // Time first seen locally (spam avoidance) + // knownAccounts (EIP-4337) + optionsAA4337 *OptionsAA4337 + // caches hash atomic.Value size atomic.Value @@ -99,6 +102,16 @@ type TxData interface { effectiveGasPrice(dst *big.Int, baseFee *big.Int) *big.Int } +// PutOptions stores the optionsAA4337 field of the conditional transaction (EIP-4337) +func (tx *Transaction) PutOptions(options *OptionsAA4337) { + tx.optionsAA4337 = options +} + +// GetOptions returns the optionsAA4337 field of the conditional transaction (EIP-4337) +func (tx *Transaction) GetOptions() *OptionsAA4337 { + return tx.optionsAA4337 +} + // EncodeRLP implements rlp.Encoder func (tx *Transaction) EncodeRLP(w io.Writer) error { if tx.Type() == LegacyTxType { diff --git a/core/types/transaction_conditional.go b/core/types/transaction_conditional.go new file mode 100644 index 0000000000..358303a0b5 --- /dev/null +++ b/core/types/transaction_conditional.go @@ -0,0 +1,146 @@ +package types + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "math/big" + + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type KnownAccounts map[common.Address]*Value + +type Value struct { + Single *common.Hash + Storage map[common.Hash]common.Hash +} + +func SingleFromHex(hex string) *Value { + return &Value{Single: common.HexToRefHash(hex)} +} + +func FromMap(m map[string]string) *Value { + res := map[common.Hash]common.Hash{} + + for k, v := range m { + res[common.HexToHash(k)] = common.HexToHash(v) + } + + return &Value{Storage: res} +} + +func (v *Value) IsSingle() bool { + return v != nil && v.Single != nil && !v.IsStorage() +} + +func (v *Value) IsStorage() bool { + return v != nil && v.Storage != nil +} + +const EmptyValue = "{}" + +func (v *Value) MarshalJSON() ([]byte, error) { + if v.IsSingle() { + return json.Marshal(v.Single) + } + + if v.IsStorage() { + return json.Marshal(v.Storage) + } + + return []byte(EmptyValue), nil +} + +const hashTypeName = "Hash" + +func (v *Value) UnmarshalJSON(data []byte) error { + if len(data) == 0 { + return nil + } + + var m map[string]json.RawMessage + + err := json.Unmarshal(data, &m) + if err != nil { + // single Hash value case + v.Single = new(common.Hash) + + innerErr := json.Unmarshal(data, v.Single) + if innerErr != nil { + return fmt.Errorf("can't unmarshal to single value with error: %v value %q", innerErr, string(data)) + } + + return nil + } + + res := make(map[common.Hash]common.Hash, len(m)) + + for k, v := range m { + // check k if it is a Hex value + var kHash common.Hash + + err = hexutil.UnmarshalFixedText(hashTypeName, []byte(k), kHash[:]) + if err != nil { + return fmt.Errorf("%w by key: %s with key %q and value %q", ErrKnownAccounts, err, k, string(v)) + } + + // check v if it is a Hex value + var vHash common.Hash + + err = hexutil.UnmarshalFixedText("hashTypeName", bytes.Trim(v, "\""), vHash[:]) + if err != nil { + return fmt.Errorf("%w by value: %s with key %q and value %q", ErrKnownAccounts, err, k, string(v)) + } + + res[kHash] = vHash + } + + v.Storage = res + + return nil +} + +func InsertKnownAccounts[T common.Hash | map[common.Hash]common.Hash](accounts KnownAccounts, k common.Address, v T) { + switch typedV := any(v).(type) { + case common.Hash: + accounts[k] = &Value{Single: &typedV} + case map[common.Hash]common.Hash: + accounts[k] = &Value{Storage: typedV} + } +} + +type OptionsAA4337 struct { + KnownAccounts KnownAccounts `json:"knownAccounts"` + BlockNumberMin *big.Int `json:"blockNumberMin"` + BlockNumberMax *big.Int `json:"blockNumberMax"` + TimestampMin *uint64 `json:"timestampMin"` + TimestampMax *uint64 `json:"timestampMax"` +} + +var ErrKnownAccounts = errors.New("an incorrect list of knownAccounts") + +func (ka KnownAccounts) ValidateLength() error { + if ka == nil { + return nil + } + + length := 0 + + for _, v := range ka { + // check if the value is hex string or an object + if v.IsSingle() { + length += 1 + } else { + length += len(v.Storage) + } + } + + if length >= 1000 { + return fmt.Errorf("number of slots/accounts in KnownAccounts %v exceeds the limit of 1000", length) + } + + return nil +} diff --git a/core/types/transaction_conditional_test.go b/core/types/transaction_conditional_test.go new file mode 100644 index 0000000000..03ce473d16 --- /dev/null +++ b/core/types/transaction_conditional_test.go @@ -0,0 +1,31 @@ +package types + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum/go-ethereum/common" +) + +func TestKnownAccounts(t *testing.T) { + t.Parallel() + + requestRaw := []byte(`{"0xadd1add1add1add1add1add1add1add1add1add1": "0x000000000000000000000000313aadca1750caadc7bcb26ff08175c95dcf8e38", "0xadd2add2add2add2add2add2add2add2add2add2": {"0x0000000000000000000000000000000000000000000000000000000000000aaa": "0x0000000000000000000000000000000000000000000000000000000000000bbb", "0x0000000000000000000000000000000000000000000000000000000000000ccc": "0x0000000000000000000000000000000000000000000000000000000000000ddd"}}`) + + accs := &KnownAccounts{} + + err := json.Unmarshal(requestRaw, accs) + require.NoError(t, err) + + expected := &KnownAccounts{ + common.HexToAddress("0xadd1add1add1add1add1add1add1add1add1add1"): SingleFromHex("0x000000000000000000000000313aadca1750caadc7bcb26ff08175c95dcf8e38"), + common.HexToAddress("0xadd2add2add2add2add2add2add2add2add2add2"): FromMap(map[string]string{ + "0x0000000000000000000000000000000000000000000000000000000000000aaa": "0x0000000000000000000000000000000000000000000000000000000000000bbb", + "0x0000000000000000000000000000000000000000000000000000000000000ccc": "0x0000000000000000000000000000000000000000000000000000000000000ddd", + }), + } + + require.Equal(t, expected, accs) +} diff --git a/docs/cli/server.md b/docs/cli/server.md index 213d491fff..865b54e3f0 100644 --- a/docs/cli/server.md +++ b/docs/cli/server.md @@ -126,6 +126,16 @@ The ```bor server``` command runs the Bor client. - ```fdlimit```: Raise the open file descriptor resource limit (default = system fd limit) (default: 0) +### ExtraDB Options + +- ```leveldb.compaction.table.size```: LevelDB SSTable/file size in mebibytes (default: 2) + +- ```leveldb.compaction.table.size.multiplier```: Multiplier on LevelDB SSTable/file size. Size for a level is determined by: `leveldb.compaction.table.size * (leveldb.compaction.table.size.multiplier ^ Level)` (default: 1) + +- ```leveldb.compaction.total.size```: Total size in mebibytes of SSTables in a given LevelDB level. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)` (default: 10) + +- ```leveldb.compaction.total.size.multiplier```: Multiplier on level size on LevelDB levels. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)` (default: 10) + ### JsonRPC Options - ```rpc.gascap```: Sets a cap on gas that can be used in eth_call/estimateGas (0=infinite) (default: 50000000) diff --git a/eth/api_backend.go b/eth/api_backend.go index 734a100605..3446b448a4 100644 --- a/eth/api_backend.go +++ b/eth/api_backend.go @@ -319,6 +319,7 @@ func (b *EthAPIBackend) SubscribeLogsEvent(ch chan<- []*types.Log) event.Subscri return b.eth.BlockChain().SubscribeLogsEvent(ch) } +// TODO - Arpit func (b *EthAPIBackend) SendTx(ctx context.Context, signedTx *types.Transaction) error { return b.eth.txPool.Add([]*txpool.Transaction{{Tx: signedTx}}, true, false)[0] } diff --git a/eth/api_debug_test.go b/eth/api_debug_test.go index 159512fab0..f133e61271 100644 --- a/eth/api_debug_test.go +++ b/eth/api_debug_test.go @@ -101,11 +101,9 @@ func TestAccountRange(t *testing.T) { if addr1 == (common.Address{}) { continue } - if _, duplicate := secondResult.Accounts[addr1]; duplicate { t.Fatalf("pagination test failed: results should not overlap") } - hList = append(hList, crypto.Keccak256Hash(addr1.Bytes())) } // Test to see if it's possible to recover from the middle of the previous @@ -114,7 +112,6 @@ func TestAccountRange(t *testing.T) { middleH := hList[AccountRangeMaxResults/2] middleResult := accountRangeTest(t, &trie, sdb, middleH, AccountRangeMaxResults, AccountRangeMaxResults) missing, infirst, insecond := 0, 0, 0 - for h := range middleResult.Accounts { if _, ok := firstResult.Accounts[h]; ok { infirst++ @@ -124,15 +121,12 @@ func TestAccountRange(t *testing.T) { missing++ } } - if missing != 0 { t.Fatalf("%d hashes in the 'middle' set were neither in the first not the second set", missing) } - if infirst != AccountRangeMaxResults/2 { t.Fatalf("Imbalance in the number of first-test results: %d != %d", infirst, AccountRangeMaxResults/2) } - if insecond != AccountRangeMaxResults/2 { t.Fatalf("Imbalance in the number of second-test results: %d != %d", insecond, AccountRangeMaxResults/2) } diff --git a/eth/backend.go b/eth/backend.go index f14b7234de..fc1bc323c2 100644 --- a/eth/backend.go +++ b/eth/backend.go @@ -348,6 +348,7 @@ func makeExtraData(extra []byte) []byte { return extra } +// PeerCount returns the number of connected peers. func (s *Ethereum) PeerCount() int { return s.p2pServer.PeerCount() } diff --git a/eth/filters/bench_test.go b/eth/filters/bench_test.go index 89a45bc2d8..3e52a6c11b 100644 --- a/eth/filters/bench_test.go +++ b/eth/filters/bench_test.go @@ -68,7 +68,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { b.Log("Running bloombits benchmark section size:", sectionSize) - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } @@ -145,7 +145,7 @@ func benchmarkBloomBits(b *testing.B, sectionSize uint64) { for i := 0; i < benchFilterCnt; i++ { if i%20 == 0 { db.Close() - db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + db, _ = rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) backend = &testBackend{db: db, sections: cnt} sys = NewFilterSystem(backend, Config{}) } @@ -187,7 +187,7 @@ func BenchmarkNoBloomBits(b *testing.B) { b.Log("Running benchmark without bloombits") - db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false) + db, err := rawdb.NewLevelDBDatabase(benchDataDir, 128, 1024, "", false, rawdb.ExtraDBConfig{}) if err != nil { b.Fatalf("error opening database at %v: %v", benchDataDir, err) } diff --git a/eth/filters/filter_test.go b/eth/filters/filter_test.go index ef41615fef..e9a24ccb17 100644 --- a/eth/filters/filter_test.go +++ b/eth/filters/filter_test.go @@ -49,7 +49,7 @@ func makeReceipt(addr common.Address) *types.Receipt { func BenchmarkFilters(b *testing.B) { var ( - db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false) + db, _ = rawdb.NewLevelDBDatabase(b.TempDir(), 0, 0, "", false, rawdb.ExtraDBConfig{}) _, sys = newTestFilterSystem(b, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr1 = crypto.PubkeyToAddress(key1.PublicKey) @@ -112,9 +112,8 @@ func BenchmarkFilters(b *testing.B) { func TestFilters(t *testing.T) { var ( - db = rawdb.NewMemoryDatabase() - _, sys = newTestFilterSystem(t, db, Config{}) - // Sender account + db, _ = rawdb.NewLevelDBDatabase(t.TempDir(), 0, 0, "", false, rawdb.ExtraDBConfig{}) + _, sys = newTestFilterSystem(t, db, Config{}) key1, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") addr = crypto.PubkeyToAddress(key1.PublicKey) signer = types.NewLondonSigner(big.NewInt(1)) diff --git a/eth/protocols/eth/broadcast.go b/eth/protocols/eth/broadcast.go index e84bfa1c80..993d160f06 100644 --- a/eth/protocols/eth/broadcast.go +++ b/eth/protocols/eth/broadcast.go @@ -84,10 +84,15 @@ func (p *Peer) broadcastTransactions() { ) for i := 0; i < len(queue) && size < maxTxPacketSize; i++ { - if tx := p.txpool.Get(queue[i]); tx != nil { - txs = append(txs, tx.Tx) - size += common.StorageSize(tx.Tx.Size()) - } + + // TODO - Arpit + // tx := p.txpool.Get(queue[i]) + + // Skip EIP-4337 bundled transactions + // if tx != nil && tx.GetOptions() == nil { + // txs = append(txs, tx) + // size += common.StorageSize(tx.Size()) + // } hashesCount++ } @@ -158,12 +163,16 @@ func (p *Peer) announceTransactions() { ) for count = 0; count < len(queue) && size < maxTxPacketSize; count++ { - if tx := p.txpool.Get(queue[count]); tx != nil { - pending = append(pending, queue[count]) - pendingTypes = append(pendingTypes, tx.Tx.Type()) - pendingSizes = append(pendingSizes, uint32(tx.Tx.Size())) - size += common.HashLength - } + // TODO - Arpit + // tx := p.txpool.Get(queue[count]) + + // // Skip EIP-4337 bundled transactions + // if tx != nil && tx.GetOptions() == nil { + // pending = append(pending, queue[count]) + // pendingTypes = append(pendingTypes, tx.Tx.Type()) + // pendingSizes = append(pendingSizes, uint32(tx.Tx.Size())) + // size += common.HashLength + // } } // Shift and trim queue queue = queue[:copy(queue, queue[count:])] diff --git a/ethdb/leveldb/leveldb.go b/ethdb/leveldb/leveldb.go index 9b26e27c08..b85bde50e3 100644 --- a/ethdb/leveldb/leveldb.go +++ b/ethdb/leveldb/leveldb.go @@ -83,9 +83,16 @@ type Database struct { log log.Logger // Contextual logger tracking the database path } +type LevelDBConfig struct { + CompactionTableSize uint64 // LevelDB SSTable/file size in mebibytes + CompactionTableSizeMultiplier float64 // Multiplier on LevelDB SSTable/file size + CompactionTotalSize uint64 // Total size in mebibytes of SSTables in a given LevelDB level + CompactionTotalSizeMultiplier float64 // Multiplier on level size on LevelDB levels +} + // New returns a wrapped LevelDB object. The namespace is the prefix that the // metrics reporting should use for surfacing internal stats. -func New(file string, cache int, handles int, namespace string, readonly bool) (*Database, error) { +func New(file string, cache int, handles int, namespace string, readonly bool, config LevelDBConfig) (*Database, error) { return NewCustom(file, namespace, func(options *opt.Options) { // Ensure we have some minimal caching and file guarantees if cache < minCache { @@ -100,6 +107,22 @@ func New(file string, cache int, handles int, namespace string, readonly bool) ( options.BlockCacheCapacity = cache / 2 * opt.MiB options.WriteBuffer = cache / 4 * opt.MiB // Two of these are used internally + if config.CompactionTableSize != 0 { + options.CompactionTableSize = int(config.CompactionTableSize * opt.MiB) + } + + if config.CompactionTableSizeMultiplier != 0 { + options.CompactionTableSizeMultiplier = config.CompactionTableSizeMultiplier + } + + if config.CompactionTotalSize != 0 { + options.CompactionTotalSize = int(config.CompactionTotalSize * opt.MiB) + } + + if config.CompactionTotalSizeMultiplier != 0 { + options.CompactionTotalSizeMultiplier = config.CompactionTotalSizeMultiplier + } + if readonly { options.ReadOnly = true } @@ -114,7 +137,14 @@ func NewCustom(file string, namespace string, customize func(options *opt.Option logger := log.New("database", file) usedCache := options.GetBlockCacheCapacity() + options.GetWriteBuffer()*2 - logCtx := []interface{}{"cache", common.StorageSize(usedCache), "handles", options.GetOpenFilesCacheCapacity()} + logCtx := []interface{}{ + "cache", common.StorageSize(usedCache), + "handles", options.GetOpenFilesCacheCapacity(), + "compactionTableSize", options.CompactionTableSize, + "compactionTableSizeMultiplier", options.CompactionTableSizeMultiplier, + "compactionTotalSize", options.CompactionTotalSize, + "compactionTotalSizeMultiplier", options.CompactionTotalSizeMultiplier} + if options.ReadOnly { logCtx = append(logCtx, "readonly", "true") } diff --git a/internal/cli/server/chains/mainnet.go b/internal/cli/server/chains/mainnet.go index 6230bbdaab..324288b2b4 100644 --- a/internal/cli/server/chains/mainnet.go +++ b/internal/cli/server/chains/mainnet.go @@ -85,7 +85,7 @@ var mainnetBor = &Chain{ Alloc: readPrealloc("allocs/mainnet.json"), }, Bootnodes: []string{ - "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303", - "enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303", + "enode://b8f1cc9c5d4403703fbf377116469667d2b1823c0daf16b7250aa576bacf399e42c3930ccfcb02c5df6879565a2b8931335565f0e8d3f8e72385ecf4a4bf160a@3.36.224.80:30303", + "enode://8729e0c825f3d9cad382555f3e46dcff21af323e89025a0e6312df541f4a9e73abfa562d64906f5e59c51fe6f0501b3e61b07979606c56329c020ed739910759@54.194.245.5:30303", }, } diff --git a/internal/cli/server/chains/mumbai.go b/internal/cli/server/chains/mumbai.go index 64b9e3aef9..91fe0c440c 100644 --- a/internal/cli/server/chains/mumbai.go +++ b/internal/cli/server/chains/mumbai.go @@ -78,7 +78,7 @@ var mumbaiTestnet = &Chain{ Alloc: readPrealloc("allocs/mumbai.json"), }, Bootnodes: []string{ - "enode://320553cda00dfc003f499a3ce9598029f364fbb3ed1222fdc20a94d97dcc4d8ba0cd0bfa996579dcc6d17a534741fb0a5da303a90579431259150de66b597251@54.147.31.250:30303", - "enode://f0f48a8781629f95ff02606081e6e43e4aebd503f3d07fc931fad7dd5ca1ba52bd849a6f6c3be0e375cf13c9ae04d859c4a9ae3546dc8ed4f10aa5dbb47d4998@34.226.134.117:30303", + "enode://bdcd4786a616a853b8a041f53496d853c68d99d54ff305615cd91c03cd56895e0a7f6e9f35dbf89131044e2114a9a782b792b5661e3aff07faf125a98606a071@43.200.206.40:30303", + "enode://209aaf7ed549cf4a5700fd833da25413f80a1248bd3aa7fe2a87203e3f7b236dd729579e5c8df61c97bf508281bae4969d6de76a7393bcbd04a0af70270333b3@54.216.248.9:30303", }, } diff --git a/internal/cli/server/config.go b/internal/cli/server/config.go index 5f11f55b73..5a33ff3d75 100644 --- a/internal/cli/server/config.go +++ b/internal/cli/server/config.go @@ -121,6 +121,8 @@ type Config struct { // Cache has the cache related settings Cache *CacheConfig `hcl:"cache,block" toml:"cache,block"` + ExtraDB *ExtraDBConfig `hcl:"leveldb,block" toml:"leveldb,block"` + // Account has the validator account related settings Accounts *AccountsConfig `hcl:"accounts,block" toml:"accounts,block"` @@ -551,6 +553,13 @@ type CacheConfig struct { FDLimit int `hcl:"fdlimit,optional" toml:"fdlimit,optional"` } +type ExtraDBConfig struct { + LevelDbCompactionTableSize uint64 `hcl:"compactiontablesize,optional" toml:"compactiontablesize,optional"` + LevelDbCompactionTableSizeMultiplier float64 `hcl:"compactiontablesizemultiplier,optional" toml:"compactiontablesizemultiplier,optional"` + LevelDbCompactionTotalSize uint64 `hcl:"compactiontotalsize,optional" toml:"compactiontotalsize,optional"` + LevelDbCompactionTotalSizeMultiplier float64 `hcl:"compactiontotalsizemultiplier,optional" toml:"compactiontotalsizemultiplier,optional"` +} + type AccountsConfig struct { // Unlock is the list of addresses to unlock in the node Unlock []string `hcl:"unlock,optional" toml:"unlock,optional"` @@ -742,6 +751,14 @@ func DefaultConfig() *Config { TrieTimeout: 60 * time.Minute, FDLimit: 0, }, + ExtraDB: &ExtraDBConfig{ + // These are LevelDB defaults, specifying here for clarity in code and in logging. + // See: https://github.com/syndtr/goleveldb/blob/126854af5e6d8295ef8e8bee3040dd8380ae72e8/leveldb/opt/options.go + LevelDbCompactionTableSize: 2, // MiB + LevelDbCompactionTableSizeMultiplier: 1, + LevelDbCompactionTotalSize: 10, // MiB + LevelDbCompactionTotalSizeMultiplier: 10, + }, Accounts: &AccountsConfig{ Unlock: []string{}, PasswordFile: "", @@ -1099,6 +1116,14 @@ func (c *Config) buildEth(stack *node.Node, accountManager *accounts.Manager) (* n.TriesInMemory = c.Cache.TriesInMemory } + // LevelDB + { + n.LevelDbCompactionTableSize = c.ExtraDB.LevelDbCompactionTableSize + n.LevelDbCompactionTableSizeMultiplier = c.ExtraDB.LevelDbCompactionTableSizeMultiplier + n.LevelDbCompactionTotalSize = c.ExtraDB.LevelDbCompactionTotalSize + n.LevelDbCompactionTotalSizeMultiplier = c.ExtraDB.LevelDbCompactionTotalSizeMultiplier + } + n.RPCGasCap = c.JsonRPC.GasCap if n.RPCGasCap != 0 { log.Info("Set global gas cap", "cap", n.RPCGasCap) diff --git a/internal/cli/server/flags.go b/internal/cli/server/flags.go index 34d3ace94f..1edacc9ac0 100644 --- a/internal/cli/server/flags.go +++ b/internal/cli/server/flags.go @@ -448,6 +448,36 @@ func (c *Command) Flags() *flagset.Flagset { Group: "Cache", }) + // LevelDB options + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "leveldb.compaction.table.size", + Usage: "LevelDB SSTable/file size in mebibytes", + Value: &c.cliConfig.ExtraDB.LevelDbCompactionTableSize, + Default: c.cliConfig.ExtraDB.LevelDbCompactionTableSize, + Group: "ExtraDB", + }) + f.Float64Flag(&flagset.Float64Flag{ + Name: "leveldb.compaction.table.size.multiplier", + Usage: "Multiplier on LevelDB SSTable/file size. Size for a level is determined by: `leveldb.compaction.table.size * (leveldb.compaction.table.size.multiplier ^ Level)`", + Value: &c.cliConfig.ExtraDB.LevelDbCompactionTableSizeMultiplier, + Default: c.cliConfig.ExtraDB.LevelDbCompactionTableSizeMultiplier, + Group: "ExtraDB", + }) + f.Uint64Flag(&flagset.Uint64Flag{ + Name: "leveldb.compaction.total.size", + Usage: "Total size in mebibytes of SSTables in a given LevelDB level. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)`", + Value: &c.cliConfig.ExtraDB.LevelDbCompactionTotalSize, + Default: c.cliConfig.ExtraDB.LevelDbCompactionTotalSize, + Group: "ExtraDB", + }) + f.Float64Flag(&flagset.Float64Flag{ + Name: "leveldb.compaction.total.size.multiplier", + Usage: "Multiplier on level size on LevelDB levels. Size for a level is determined by: `leveldb.compaction.total.size * (leveldb.compaction.total.size.multiplier ^ Level)`", + Value: &c.cliConfig.ExtraDB.LevelDbCompactionTotalSizeMultiplier, + Default: c.cliConfig.ExtraDB.LevelDbCompactionTotalSizeMultiplier, + Group: "ExtraDB", + }) + // rpc options f.Uint64Flag(&flagset.Uint64Flag{ Name: "rpc.gascap", diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 7ef511c490..1298e1371f 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -2165,6 +2165,10 @@ func (s *TransactionAPI) GetTransactionReceipt(ctx context.Context, hash common. "effectiveGasPrice": (*hexutil.Big)(receipt.EffectiveGasPrice), } + if receipt.EffectiveGasPrice == nil { + fields["effectiveGasPrice"] = new(hexutil.Big) + } + // Assign receipt status or post state. if len(receipt.PostState) > 0 { fields["root"] = hexutil.Bytes(receipt.PostState) diff --git a/internal/ethapi/api_test.go b/internal/ethapi/api_test.go index 436e36e6a6..6f8469fbdc 100644 --- a/internal/ethapi/api_test.go +++ b/internal/ethapi/api_test.go @@ -50,6 +50,46 @@ import ( "golang.org/x/exp/slices" ) +func testTransactionMarshal(t *testing.T, tests []txData, config *params.ChainConfig) { + t.Helper() + t.Parallel() + + var ( + signer = types.LatestSigner(config) + key, _ = crypto.HexToECDSA("b71c71a67e1177ad4e901695e1b4b9ee17ae16c6668d313eac2f96dbcda3f291") + ) + + for i, tt := range tests { + var tx2 types.Transaction + + tx, err := types.SignNewTx(key, signer, tt.Tx) + if err != nil { + t.Fatalf("test %d: signing failed: %v", i, err) + } + // Regular transaction + if data, err := json.Marshal(tx); err != nil { + t.Fatalf("test %d: marshalling failed; %v", i, err) + } else if err = tx2.UnmarshalJSON(data); err != nil { + t.Fatalf("test %d: sunmarshal failed: %v", i, err) + } else if want, have := tx.Hash(), tx2.Hash(); want != have { + t.Fatalf("test %d: stx changed, want %x have %x", i, want, have) + } + + // rpcTransaction + rpcTx := newRPCTransaction(tx, common.Hash{}, 0, 0, 0, big.NewInt(0), config) + if data, err := json.Marshal(rpcTx); err != nil { + t.Fatalf("test %d: marshalling failed; %v", i, err) + } else if err = tx2.UnmarshalJSON(data); err != nil { + t.Fatalf("test %d: unmarshal failed: %v", i, err) + } else if want, have := tx.Hash(), tx2.Hash(); want != have { + t.Fatalf("test %d: tx changed, want %x have %x", i, want, have) + } else { + want, have := tt.Want, string(data) + require.JSONEqf(t, want, have, "test %d: rpc json not match, want %s have %s", i, want, have) + } + } +} + func TestTransaction_RoundTripRpcJSON(t *testing.T) { var ( config = params.AllEthashProtocolChanges @@ -58,7 +98,7 @@ func TestTransaction_RoundTripRpcJSON(t *testing.T) { tests = allTransactionTypes(common.Address{0xde, 0xad}, config) ) - t.Parallel() + testTransactionMarshal(t, tests, config) for i, tt := range tests { var tx2 types.Transaction diff --git a/internal/ethapi/backend.go b/internal/ethapi/backend.go index 2033fc9123..706be22e60 100644 --- a/internal/ethapi/backend.go +++ b/internal/ethapi/backend.go @@ -139,6 +139,9 @@ func GetAPIs(apiBackend Backend) []rpc.API { }, { Namespace: "personal", Service: NewPersonalAccountAPI(apiBackend, nonceLock), + }, { + Namespace: "bor", + Service: NewBorAPI(apiBackend), }, } } diff --git a/internal/ethapi/bor_api.go b/internal/ethapi/bor_api.go index cf1bdbcb26..ce452dff5b 100644 --- a/internal/ethapi/bor_api.go +++ b/internal/ethapi/bor_api.go @@ -4,7 +4,9 @@ import ( "context" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rpc" ) // GetRootHash returns root hash for given start and end block @@ -51,3 +53,54 @@ func (s *BlockChainAPI) appendRPCMarshalBorTransaction(ctx context.Context, bloc return fields } + +// BorAPI provides an API to access Bor related information. +type BorAPI struct { + b Backend +} + +// NewBorAPI creates a new Bor protocol API. +func NewBorAPI(b Backend) *BorAPI { + return &BorAPI{b} +} + +// SendRawTransactionConditional will add the signed transaction to the transaction pool. +// The sender/bundler is responsible for signing the transaction +func (api *BorAPI) SendRawTransactionConditional(ctx context.Context, input hexutil.Bytes, options types.OptionsAA4337) (common.Hash, error) { + tx := new(types.Transaction) + if err := tx.UnmarshalBinary(input); err != nil { + return common.Hash{}, err + } + + currentHeader := api.b.CurrentHeader() + currentState, _, _ := api.b.StateAndHeaderByNumber(ctx, rpc.BlockNumber(currentHeader.Number.Int64())) + + // check block number range + if err := currentHeader.ValidateBlockNumberOptions4337(options.BlockNumberMin, options.BlockNumberMax); err != nil { + return common.Hash{}, &rpc.OptionsValidateError{Message: "out of block range. err: " + err.Error()} + } + + // check timestamp range + if err := currentHeader.ValidateTimestampOptions4337(options.TimestampMin, options.TimestampMax); err != nil { + return common.Hash{}, &rpc.OptionsValidateError{Message: "out of time range. err: " + err.Error()} + } + + // check knownAccounts length (number of slots/accounts) should be less than 1000 + if err := options.KnownAccounts.ValidateLength(); err != nil { + return common.Hash{}, &rpc.KnownAccountsLimitExceededError{Message: "limit exceeded. err: " + err.Error()} + } + + // check knownAccounts + if err := currentState.ValidateKnownAccounts(options.KnownAccounts); err != nil { + return common.Hash{}, &rpc.OptionsValidateError{Message: "storage error. err: " + err.Error()} + } + + // put options data in Tx, to use it later while block building + tx.PutOptions(&options) + + return SubmitTransaction(ctx, api.b, tx) +} + +func (api *BorAPI) GetVoteOnHash(ctx context.Context, starBlockNr uint64, endBlockNr uint64, hash string, milestoneId string) (bool, error) { + return api.b.GetVoteOnHash(ctx, starBlockNr, endBlockNr, hash, milestoneId) +} diff --git a/internal/jsre/deps/web3.js b/internal/jsre/deps/web3.js index fa66d950b0..a8ca37cec5 100644 --- a/internal/jsre/deps/web3.js +++ b/internal/jsre/deps/web3.js @@ -5383,6 +5383,13 @@ var methods = function () { inputFormatter: [null] }); + var sendRawTransactionConditional = new Method({ + name: 'sendRawTransactionConditional', + call: 'eth_sendRawTransactionConditional', + params: 2, + inputFormatter: [null] + }); + var sendTransaction = new Method({ name: 'sendTransaction', call: 'eth_sendTransaction', @@ -5471,6 +5478,7 @@ var methods = function () { call, estimateGas, sendRawTransaction, + sendRawTransactionConditional, signTransaction, sendTransaction, sign, diff --git a/internal/web3ext/bor_ext.go b/internal/web3ext/bor_ext.go index 2f2f2acdbc..c8236dbcac 100644 --- a/internal/web3ext/bor_ext.go +++ b/internal/web3ext/bor_ext.go @@ -65,6 +65,12 @@ web3._extend({ call: 'bor_getVoteOnHash', params: 4, }), + new web3._extend.Method({ + name: 'sendRawTransactionConditional', + call: 'bor_sendRawTransactionConditional', + params: 2, + inputFormatter: [null] + }), ] }); ` diff --git a/miner/miner.go b/miner/miner.go index d293dd7926..cfc85ebfcb 100644 --- a/miner/miner.go +++ b/miner/miner.go @@ -101,6 +101,10 @@ func New(eth Backend, config *Config, chainConfig *params.ChainConfig, mux *even return miner } +func (miner *Miner) GetWorker() *worker { + return miner.worker +} + // update keeps track of the downloader events. Please be aware that this is a one shot type of update loop. // It's entered once and as soon as `Done` or `Failed` has been broadcasted the events are unregistered and // the loop is exited. This to prevent a major security vuln where external parties can DOS you with blocks @@ -194,7 +198,7 @@ func (miner *Miner) Close() { } func (miner *Miner) Mining() bool { - return miner.worker.isRunning() + return miner.worker.IsRunning() } func (miner *Miner) Hashrate() uint64 { diff --git a/miner/test_backend.go b/miner/test_backend.go index 66bff4d9f1..afab55b73e 100644 --- a/miner/test_backend.go +++ b/miner/test_backend.go @@ -417,7 +417,7 @@ func (w *worker) mainLoopWithDelay(ctx context.Context, delay uint, opcodeDelay // Note all transactions received may not be continuous with transactions // already included in the current sealing block. These transactions will // be automatically eliminated. - if !w.isRunning() && w.current != nil { + if !w.IsRunning() && w.current != nil { // If block is already full, abort if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { continue @@ -478,7 +478,7 @@ func (w *worker) commitWorkWithDelay(ctx context.Context, interrupt *int32, noem tracing.Exec(ctx, "", "worker.prepareWork", func(ctx context.Context, span trace.Span) { // Set the coinbase if the worker is running or it's required var coinbase common.Address - if w.isRunning() { + if w.IsRunning() { if w.coinbase == (common.Address{}) { log.Error("Refusing to mine without etherbase") return @@ -861,7 +861,7 @@ mainloop: } } - if !w.isRunning() && len(coalescedLogs) > 0 { + if !w.IsRunning() && len(coalescedLogs) > 0 { // We don't push the pendingLogsEvent while we are sealing. The reason is that // when we are sealing, the worker will regenerate a sealing block every 3 seconds. // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. diff --git a/miner/worker.go b/miner/worker.go index 2690fd5e7b..1ef5c97f9c 100644 --- a/miner/worker.go +++ b/miner/worker.go @@ -453,7 +453,7 @@ func (w *worker) stop() { } // isRunning returns an indicator whether worker is running or not. -func (w *worker) isRunning() bool { +func (w *worker) IsRunning() bool { return w.running.Load() } @@ -551,7 +551,7 @@ func (w *worker) newWorkLoop(ctx context.Context, recommit time.Duration) { case <-timer.C: // If sealing is running resubmit a new work cycle periodically to pull in // higher priced transactions. Disable this overhead for pending blocks. - if w.isRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { + if w.IsRunning() && (w.chainConfig.Clique == nil || w.chainConfig.Clique.Period > 0) { // Short circuit if no new transaction arrives. if w.newTxs.Load() == 0 { timer.Reset(recommit) @@ -638,7 +638,8 @@ func (w *worker) mainLoop(ctx context.Context) { // Note all transactions received may not be continuous with transactions // already included in the current sealing block. These transactions will // be automatically eliminated. - if !w.isRunning() && w.current != nil { + // nolint : nestif + if !w.IsRunning() && w.current != nil { // If block is already full, abort if gp := w.current.gasPool; gp != nil && gp.Gas() < params.TxGas { continue @@ -1026,6 +1027,32 @@ mainloop: // during transaction acceptance is the transaction pool. from, _ := types.Sender(env.signer, tx.Tx) + // TODO - Arpit + // not prioritising conditional transaction, yet. + //nolint:nestif + // if options := tx.GetOptions(); options != nil { + // if err := env.header.ValidateBlockNumberOptions4337(options.BlockNumberMin, options.BlockNumberMax); err != nil { + // log.Trace("Dropping conditional transaction", "from", from, "hash", tx.Hash(), "reason", err) + // txs.Pop() + + // continue + // } + + // if err := env.header.ValidateTimestampOptions4337(options.TimestampMin, options.TimestampMax); err != nil { + // log.Trace("Dropping conditional transaction", "from", from, "hash", tx.Hash(), "reason", err) + // txs.Pop() + + // continue + // } + + // if err := env.state.ValidateKnownAccounts(options.KnownAccounts); err != nil { + // log.Trace("Dropping conditional transaction", "from", from, "hash", tx.Hash(), "reason", err) + // txs.Pop() + + // continue + // } + // } + // Check whether the tx is replay protected. If we're not in the EIP155 hf // phase, start ignoring the sender until we do. if tx.Tx.Protected() && !w.chainConfig.IsEIP155(env.header.Number) { @@ -1091,7 +1118,7 @@ mainloop: } // nolint:nestif - if EnableMVHashMap && w.isRunning() { + if EnableMVHashMap && w.IsRunning() { close(chDeps) depsWg.Wait() @@ -1152,7 +1179,7 @@ mainloop: } - if !w.isRunning() && len(coalescedLogs) > 0 { + if !w.IsRunning() && len(coalescedLogs) > 0 { // We don't push the pendingLogsEvent while we are sealing. The reason is that // when we are sealing, the worker will regenerate a sealing block every 3 seconds. // In order to avoid pushing the repeated pendingLog, we disable the pending log pushing. @@ -1548,7 +1575,7 @@ func (w *worker) commitWork(ctx context.Context, interrupt *atomic.Int32, noempt tracing.Exec(ctx, "", "worker.prepareWork", func(ctx context.Context, span trace.Span) { // Set the coinbase if the worker is running or it's required var coinbase common.Address - if w.isRunning() { + if w.IsRunning() { coinbase = w.etherbase() if coinbase == (common.Address{}) { log.Error("Refusing to mine without etherbase") @@ -1663,7 +1690,7 @@ func getInterruptTimer(ctx context.Context, work *environment, current *types.Bl // Note the assumption is held that the mutation is allowed to the passed env, do // the deep copy first. func (w *worker) commit(ctx context.Context, env *environment, interval func(), update bool, start time.Time) error { - if w.isRunning() { + if w.IsRunning() { ctx, span := tracing.StartSpan(ctx, "commit") defer tracing.EndSpan(span) diff --git a/params/bootnodes.go b/params/bootnodes.go index 8312e48321..100a4b29be 100644 --- a/params/bootnodes.go +++ b/params/bootnodes.go @@ -63,15 +63,15 @@ var GoerliBootnodes = []string{ // MumbaiBootnodes are the enode URLs of the P2P bootstrap nodes running on the // Mumbai test network. var MumbaiBootnodes = []string{ - "enode://320553cda00dfc003f499a3ce9598029f364fbb3ed1222fdc20a94d97dcc4d8ba0cd0bfa996579dcc6d17a534741fb0a5da303a90579431259150de66b597251@54.147.31.250:30303", - "enode://f0f48a8781629f95ff02606081e6e43e4aebd503f3d07fc931fad7dd5ca1ba52bd849a6f6c3be0e375cf13c9ae04d859c4a9ae3546dc8ed4f10aa5dbb47d4998@34.226.134.117:30303", + "enode://bdcd4786a616a853b8a041f53496d853c68d99d54ff305615cd91c03cd56895e0a7f6e9f35dbf89131044e2114a9a782b792b5661e3aff07faf125a98606a071@43.200.206.40:30303", + "enode://209aaf7ed549cf4a5700fd833da25413f80a1248bd3aa7fe2a87203e3f7b236dd729579e5c8df61c97bf508281bae4969d6de76a7393bcbd04a0af70270333b3@54.216.248.9:30303", } // BorMainnetBootnodes are the enode URLs of the P2P bootstrap nodes running on the // main Bor network. var BorMainnetBootnodes = []string{ - "enode://0cb82b395094ee4a2915e9714894627de9ed8498fb881cec6db7c65e8b9a5bd7f2f25cc84e71e89d0947e51c76e85d0847de848c7782b13c0255247a6758178c@44.232.55.71:30303", - "enode://88116f4295f5a31538ae409e4d44ad40d22e44ee9342869e7d68bdec55b0f83c1530355ce8b41fbec0928a7d75a5745d528450d30aec92066ab6ba1ee351d710@159.203.9.164:30303", + "enode://b8f1cc9c5d4403703fbf377116469667d2b1823c0daf16b7250aa576bacf399e42c3930ccfcb02c5df6879565a2b8931335565f0e8d3f8e72385ecf4a4bf160a@3.36.224.80:30303", + "enode://8729e0c825f3d9cad382555f3e46dcff21af323e89025a0e6312df541f4a9e73abfa562d64906f5e59c51fe6f0501b3e61b07979606c56329c020ed739910759@54.194.245.5:30303", } var KilnBootnodes = []string{ diff --git a/rpc/errors.go b/rpc/errors.go index 50d784fa10..3435d97a28 100644 --- a/rpc/errors.go +++ b/rpc/errors.go @@ -165,3 +165,15 @@ type CustomError struct { func (e *CustomError) ErrorCode() int { return e.Code } func (e *CustomError) Error() string { return e.ValidationError } + +type OptionsValidateError struct{ Message string } + +func (e *OptionsValidateError) ErrorCode() int { return -32003 } + +func (e *OptionsValidateError) Error() string { return e.Message } + +type KnownAccountsLimitExceededError struct{ Message string } + +func (e *KnownAccountsLimitExceededError) ErrorCode() int { return -32005 } + +func (e *KnownAccountsLimitExceededError) Error() string { return e.Message } diff --git a/rpc/handler.go b/rpc/handler.go index 716b2d8657..608c3589cd 100644 --- a/rpc/handler.go +++ b/rpc/handler.go @@ -404,6 +404,7 @@ func (h *handler) startCallProc(fn func(*callProc)) { h.executionPool.Submit(context.Background(), func() error { defer h.callWG.Done() defer cancel() + fn(&callProc{ctx: ctx}) h.executionPool.processed.Add(1) diff --git a/rpc/ipc.go b/rpc/ipc.go index 31ea91aef9..03498d3cda 100644 --- a/rpc/ipc.go +++ b/rpc/ipc.go @@ -30,6 +30,7 @@ func (s *Server) ServeListener(l net.Listener) error { conn, err := l.Accept() if netutil.IsTemporaryError(err) { log.Warn("RPC accept error", "err", err) + continue } else if err != nil { return err