From 2f72a6c1d0e75936237b8b3de431dcc4d011e665 Mon Sep 17 00:00:00 2001 From: Kaloyan Tanev Date: Mon, 9 Dec 2024 16:44:38 +0200 Subject: [PATCH] Version cut v1.2.0 --- .../version-v1.2.0/adv/_category_.json | 7 + .../adv/advanced/_category_.json | 5 + .../adv/advanced/adv-docker-configs.md | 38 + .../adv/advanced/quickstart-combine.md | 112 +++ .../adv/advanced/quickstart-sdk.mdx | 129 ++++ .../adv/advanced/quickstart-split.md | 104 +++ .../version-v1.2.0/adv/advanced/self-relay.md | 38 + .../adv/security/_category_.json | 5 + .../version-v1.2.0/adv/security/bug-bounty.md | 180 +++++ .../version-v1.2.0/adv/security/contact.md | 10 + .../adv/security/ev-assessment.md | 295 +++++++ .../version-v1.2.0/adv/security/overview.md | 40 + .../version-v1.2.0/adv/security/risks.md | 45 ++ .../adv/security/smart_contract_audit.mdx | 495 ++++++++++++ .../adv/security/threat_model.md | 155 ++++ .../adv/troubleshooting/_category_.json | 5 + .../troubleshooting/client_configurations.md | 82 ++ .../adv/troubleshooting/dkg_failure.md | 86 +++ .../adv/troubleshooting/errors.mdx | 694 +++++++++++++++++ .../adv/troubleshooting/test_command.md | 161 ++++ .../version-v1.2.0/gov/_category_.json | 7 + .../gov/community/_category_.json | 5 + .../gov/community/staking-masters.md | 26 + .../version-v1.2.0/gov/community/techne.md | 119 +++ .../gov/contribution/_category_.json | 5 + .../gov/contribution/bug-report.md | 60 ++ .../version-v1.2.0/gov/contribution/docs.md | 253 ++++++ .../gov/contribution/feedback.md | 12 + .../gov/governance/_category_.json | 5 + .../gov/governance/collective.md | 54 ++ .../gov/governance/contributions.md | 92 +++ .../gov/governance/delegate-guide.md | 57 ++ .../gov/governance/obol-token.md | 24 + .../version-v1.2.0/gov/governance/raf.md | 67 ++ .../gov/governance/token-house.md | 79 ++ .../version-v1.2.0/guides/_category_.json | 7 + .../guides/walkthroughs/_category_.json | 5 + .../guides/walkthroughs/walkthrough-guides.md | 10 + .../version-v1.2.0/learn/_category_.json | 7 + .../learn/charon/_category_.json | 5 + .../learn/charon/charon-cli-reference.md | 704 +++++++++++++++++ .../learn/charon/cluster-configuration.md | 163 ++++ .../version-v1.2.0/learn/charon/dkg.md | 73 ++ .../version-v1.2.0/learn/charon/intro.md | 89 +++ .../learn/charon/networking.mdx | 102 +++ .../learn/futher-reading/_category_.json | 5 + .../learn/futher-reading/ethereum_and_dvt.md | 54 ++ .../learn/futher-reading/peer_score.md | 47 ++ .../learn/futher-reading/resources.md | 54 ++ .../learn/futher-reading/testnet.md | 189 +++++ .../learn/intro/_category_.json | 5 + .../version-v1.2.0/learn/intro/faq.mdx | 189 +++++ .../learn/intro/key-concepts.md | 110 +++ .../version-v1.2.0/learn/intro/launchpad.md | 27 + .../learn/intro/obol-collective.md | 34 + .../learn/intro/obol-splits.mdx | 100 +++ .../learn/intro/obol-vs-others.md | 60 ++ .../learn/intro/staking-stack.md | 33 + .../version-v1.2.0/run/_category_.json | 7 + .../run/integrations/Dappnode.mdx | 32 + .../run/integrations/_category_.json | 5 + .../run/integrations/lido-csm.md | 232 ++++++ .../run/integrations/quickstart-eigenpod.mdx | 66 ++ .../run/prepare/_category_.json | 5 + .../run/prepare/deployment-best-practices.md | 101 +++ .../run/prepare/how_where_DVs.md | 49 ++ .../run/prepare/test-command.mdx | 668 ++++++++++++++++ .../run/running/_category_.json | 5 + .../run/running/activate-dv.mdx | 37 + .../version-v1.2.0/run/running/monitoring.md | 90 +++ .../run/running/quickstart-exit.mdx | 302 ++++++++ .../version-v1.2.0/run/running/update.mdx | 85 +++ .../version-v1.2.0/run/start/_category_.json | 5 + .../run/start/obol-monitoring.md | 57 ++ .../run/start/quickstart-builder-api.mdx | 165 ++++ .../run/start/quickstart_alone.mdx | 199 +++++ .../run/start/quickstart_group.mdx | 717 ++++++++++++++++++ .../run/start/quickstart_overview.md | 19 + .../version-v1.2.0/sdk/_category_.json | 6 + .../version-v1.2.0/sdk/classes/Client.md | 247 ++++++ .../sdk/enumerations/FORK_MAPPING.md | 10 + .../sdk/functions/validateClusterLock.md | 26 + versioned_docs/version-v1.2.0/sdk/index.md | 90 +++ .../sdk/interfaces/ClusterDefinition.md | 24 + .../sdk/interfaces/RewardsSplitPayload.md | 17 + .../sdk/type-aliases/BuilderRegistration.md | 14 + .../BuilderRegistrationMessage.md | 16 + .../sdk/type-aliases/ClusterCreator.md | 14 + .../sdk/type-aliases/ClusterLock.md | 17 + .../sdk/type-aliases/ClusterOperator.md | 18 + .../sdk/type-aliases/ClusterPayload.md | 16 + .../sdk/type-aliases/ClusterValidator.md | 14 + .../sdk/type-aliases/DepositData.md | 17 + .../sdk/type-aliases/DistributedValidator.md | 17 + .../sdk/type-aliases/ETH_ADDRESS.md | 7 + .../sdk/type-aliases/OperatorPayload.md | 7 + .../sdk/type-aliases/SplitRecipient.md | 14 + .../sdk/type-aliases/TotalSplitPayload.md | 16 + .../version-v1.2.0-sidebars.json | 127 ++++ versions.json | 1 + 100 files changed, 9204 insertions(+) create mode 100644 versioned_docs/version-v1.2.0/adv/_category_.json create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/_category_.json create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/adv-docker-configs.md create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/quickstart-combine.md create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/quickstart-sdk.mdx create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/quickstart-split.md create mode 100644 versioned_docs/version-v1.2.0/adv/advanced/self-relay.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/_category_.json create mode 100644 versioned_docs/version-v1.2.0/adv/security/bug-bounty.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/contact.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/ev-assessment.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/overview.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/risks.md create mode 100644 versioned_docs/version-v1.2.0/adv/security/smart_contract_audit.mdx create mode 100644 versioned_docs/version-v1.2.0/adv/security/threat_model.md create mode 100644 versioned_docs/version-v1.2.0/adv/troubleshooting/_category_.json create mode 100644 versioned_docs/version-v1.2.0/adv/troubleshooting/client_configurations.md create mode 100644 versioned_docs/version-v1.2.0/adv/troubleshooting/dkg_failure.md create mode 100644 versioned_docs/version-v1.2.0/adv/troubleshooting/errors.mdx create mode 100644 versioned_docs/version-v1.2.0/adv/troubleshooting/test_command.md create mode 100644 versioned_docs/version-v1.2.0/gov/_category_.json create mode 100644 versioned_docs/version-v1.2.0/gov/community/_category_.json create mode 100644 versioned_docs/version-v1.2.0/gov/community/staking-masters.md create mode 100644 versioned_docs/version-v1.2.0/gov/community/techne.md create mode 100644 versioned_docs/version-v1.2.0/gov/contribution/_category_.json create mode 100644 versioned_docs/version-v1.2.0/gov/contribution/bug-report.md create mode 100644 versioned_docs/version-v1.2.0/gov/contribution/docs.md create mode 100644 versioned_docs/version-v1.2.0/gov/contribution/feedback.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/_category_.json create mode 100644 versioned_docs/version-v1.2.0/gov/governance/collective.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/contributions.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/delegate-guide.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/obol-token.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/raf.md create mode 100644 versioned_docs/version-v1.2.0/gov/governance/token-house.md create mode 100644 versioned_docs/version-v1.2.0/guides/_category_.json create mode 100644 versioned_docs/version-v1.2.0/guides/walkthroughs/_category_.json create mode 100644 versioned_docs/version-v1.2.0/guides/walkthroughs/walkthrough-guides.md create mode 100644 versioned_docs/version-v1.2.0/learn/_category_.json create mode 100644 versioned_docs/version-v1.2.0/learn/charon/_category_.json create mode 100644 versioned_docs/version-v1.2.0/learn/charon/charon-cli-reference.md create mode 100644 versioned_docs/version-v1.2.0/learn/charon/cluster-configuration.md create mode 100644 versioned_docs/version-v1.2.0/learn/charon/dkg.md create mode 100644 versioned_docs/version-v1.2.0/learn/charon/intro.md create mode 100644 versioned_docs/version-v1.2.0/learn/charon/networking.mdx create mode 100644 versioned_docs/version-v1.2.0/learn/futher-reading/_category_.json create mode 100644 versioned_docs/version-v1.2.0/learn/futher-reading/ethereum_and_dvt.md create mode 100644 versioned_docs/version-v1.2.0/learn/futher-reading/peer_score.md create mode 100644 versioned_docs/version-v1.2.0/learn/futher-reading/resources.md create mode 100644 versioned_docs/version-v1.2.0/learn/futher-reading/testnet.md create mode 100644 versioned_docs/version-v1.2.0/learn/intro/_category_.json create mode 100644 versioned_docs/version-v1.2.0/learn/intro/faq.mdx create mode 100644 versioned_docs/version-v1.2.0/learn/intro/key-concepts.md create mode 100644 versioned_docs/version-v1.2.0/learn/intro/launchpad.md create mode 100644 versioned_docs/version-v1.2.0/learn/intro/obol-collective.md create mode 100644 versioned_docs/version-v1.2.0/learn/intro/obol-splits.mdx create mode 100644 versioned_docs/version-v1.2.0/learn/intro/obol-vs-others.md create mode 100644 versioned_docs/version-v1.2.0/learn/intro/staking-stack.md create mode 100644 versioned_docs/version-v1.2.0/run/_category_.json create mode 100644 versioned_docs/version-v1.2.0/run/integrations/Dappnode.mdx create mode 100644 versioned_docs/version-v1.2.0/run/integrations/_category_.json create mode 100644 versioned_docs/version-v1.2.0/run/integrations/lido-csm.md create mode 100644 versioned_docs/version-v1.2.0/run/integrations/quickstart-eigenpod.mdx create mode 100644 versioned_docs/version-v1.2.0/run/prepare/_category_.json create mode 100644 versioned_docs/version-v1.2.0/run/prepare/deployment-best-practices.md create mode 100644 versioned_docs/version-v1.2.0/run/prepare/how_where_DVs.md create mode 100644 versioned_docs/version-v1.2.0/run/prepare/test-command.mdx create mode 100644 versioned_docs/version-v1.2.0/run/running/_category_.json create mode 100644 versioned_docs/version-v1.2.0/run/running/activate-dv.mdx create mode 100644 versioned_docs/version-v1.2.0/run/running/monitoring.md create mode 100644 versioned_docs/version-v1.2.0/run/running/quickstart-exit.mdx create mode 100644 versioned_docs/version-v1.2.0/run/running/update.mdx create mode 100644 versioned_docs/version-v1.2.0/run/start/_category_.json create mode 100644 versioned_docs/version-v1.2.0/run/start/obol-monitoring.md create mode 100644 versioned_docs/version-v1.2.0/run/start/quickstart-builder-api.mdx create mode 100644 versioned_docs/version-v1.2.0/run/start/quickstart_alone.mdx create mode 100644 versioned_docs/version-v1.2.0/run/start/quickstart_group.mdx create mode 100644 versioned_docs/version-v1.2.0/run/start/quickstart_overview.md create mode 100644 versioned_docs/version-v1.2.0/sdk/_category_.json create mode 100644 versioned_docs/version-v1.2.0/sdk/classes/Client.md create mode 100644 versioned_docs/version-v1.2.0/sdk/enumerations/FORK_MAPPING.md create mode 100644 versioned_docs/version-v1.2.0/sdk/functions/validateClusterLock.md create mode 100644 versioned_docs/version-v1.2.0/sdk/index.md create mode 100644 versioned_docs/version-v1.2.0/sdk/interfaces/ClusterDefinition.md create mode 100644 versioned_docs/version-v1.2.0/sdk/interfaces/RewardsSplitPayload.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistration.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistrationMessage.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterCreator.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterLock.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterOperator.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterPayload.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterValidator.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/DepositData.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/DistributedValidator.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/ETH_ADDRESS.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/OperatorPayload.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/SplitRecipient.md create mode 100644 versioned_docs/version-v1.2.0/sdk/type-aliases/TotalSplitPayload.md create mode 100644 versioned_sidebars/version-v1.2.0-sidebars.json diff --git a/versioned_docs/version-v1.2.0/adv/_category_.json b/versioned_docs/version-v1.2.0/adv/_category_.json new file mode 100644 index 0000000000..55aa8078c3 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "ADVANCED & TROUBLESHOOTING", + "position": 3, + "collapsed": false, + "collapsible": false, + "className": "menuSection" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/adv/advanced/_category_.json b/versioned_docs/version-v1.2.0/adv/advanced/_category_.json new file mode 100644 index 0000000000..65479986d0 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Advanced Guides", + "position": 1, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/adv/advanced/adv-docker-configs.md b/versioned_docs/version-v1.2.0/adv/advanced/adv-docker-configs.md new file mode 100644 index 0000000000..826b224b39 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/adv-docker-configs.md @@ -0,0 +1,38 @@ +--- +sidebar_position: 6 +description: Use advanced docker-compose features to have more flexibility and power to change the default configuration. +--- + +# Advanced Docker Configs + +:::info +This section is intended for *docker power users*, i.e.: for those who are familiar with working with `docker compose` and want to have more flexibility and power to change the default configuration. +::: + +We use the "Multiple Compose File" feature which provides a very powerful way to override any configuration in `docker-compose.yml` without needing to modify git-checked-in files since that results in conflicts when upgrading this repo. +See [this](https://docs.docker.com/compose/extends/#multiple-compose-files) for more details. + +There are some additional compose files in [this repository](https://github.com/ObolNetwork/charon-distributed-validator-node/), `compose-debug.yml` and `docker-compose.override.yml.sample`, along-with the default `docker-compose.yml` file that you can use for this purpose. + +- `compose-debug.yml` contains some additional containers that developers can use for debugging, like `jaeger`. To achieve this, you can run: + +```shell +docker compose -f docker-compose.yml -f compose-debug.yml up +``` + +- `docker-compose.override.yml.sample` is intended to override the default configuration provided in `docker-compose.yml`. This is useful when, for example, you wish to add port mappings or want to disable a container. + +- To use it, just copy the sample file to `docker-compose.override.yml` and customise it to your liking. Please create this file ONLY when you want to tweak something. This is because the default override file is empty and docker errors if you provide an empty compose file. + +```shell +cp docker-compose.override.yml.sample docker-compose.override.yml + +# Tweak docker-compose.override.yml and then run docker compose up +docker compose up +``` + +- You can also run all these compose files together. This is desirable when you want to use both the features. For example, you may want to have some debugging containers AND also want to override some defaults. To achieve this, you can run: + +```shell +docker compose -f docker-compose.yml -f docker-compose.override.yml -f compose-debug.yml up +``` diff --git a/versioned_docs/version-v1.2.0/adv/advanced/quickstart-combine.md b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-combine.md new file mode 100644 index 0000000000..6229c33af1 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-combine.md @@ -0,0 +1,112 @@ +--- +sidebar_position: 4 +description: Combine distributed validator private key shares to recover the validator private key. +--- + +# Combine DV Private Key Shares + +:::danger +Reconstituting Distributed Validator private key shares into a standard validator private key is a security risk, and can potentially cause your validator to be slashed. + +Only combine private keys as a last resort and do so with extreme caution. +::: + +Combine distributed validator private key shares into an Ethereum validator private key. + +## Pre-requisites + +- Ensure you have the `.charon` directories of at least a threshold of the cluster's node operators. +- Ensure you have [docker](https://docs.docker.com/engine/install/) installed. +- Make sure `docker` is running before executing the commands below. + +## Step 1. Set up the key combination directory tree + +Rename each cluster node operator `.charon` directory in a different way to avoid folder name conflicts. + +We suggest naming them clearly and distinctly, to avoid confusion. + +At the end of this process, you should have a tree like this: + +```shell +$ tree ./cluster + +cluster/ +├── node0 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +├── node1 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +├── node2 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +... +└── nodeN + ├── charon-enr-private-key + ├── cluster-lock.json + ├── deposit-data.json + └── validator_keys + ├── keystore-0.json + ├── keystore-0.txt + ├── keystore-1.json + └── keystore-1.txt +``` + +:::warning +Make sure to never mix the various `.charon` directories with one another. + +Doing so can potentially cause the combination process to fail. +::: + +## Step 2. Combine the key shares + +Run the following command: + +```shell +# Combine a clusters private keys +docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 combine --cluster-dir /opt/charon/cluster --output-dir /opt/charon/combined +``` + +This command will store the combined keys in the `output-dir`, in this case a folder named `combined`. + +```shell +$ tree combined +combined +├── keystore-0.json +├── keystore-0.txt +├── keystore-1.json +└── keystore-1.txt +``` + +We can verify that the directory names are correct by looking at the lock file: + +```shell +$ jq .distributed_validators[].distributed_public_key cluster/node0/cluster-lock.json +"0x822c5310674f4fc4ec595642d0eab73d01c62b588f467da6f98564f292a975a0ac4c3a10f1b3a00ccc166a28093c2dcd" +"0x8929b4c8af2d2eb222d377cac2aa7be950e71d2b247507d19b5fdec838f0fb045ea8910075f191fd468da4be29690106" +``` + +:::info + +The generated private keys are in the standard [EIP-2335](https://github.com/ethereum/ercs/blob/master/ERCS/erc-2335.md) format, and can be imported in any Ethereum validator client that supports it. + +Ensure your distributed validator cluster is completely shut down before starting a replacement validator or you are likely to be slashed. +::: diff --git a/versioned_docs/version-v1.2.0/adv/advanced/quickstart-sdk.mdx b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-sdk.mdx new file mode 100644 index 0000000000..2da4f4e303 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-sdk.mdx @@ -0,0 +1,129 @@ +--- +sidebar_position: 2 +description: Create a DV cluster using the Obol Typescript SDK +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Create a DV Using the SDK + +This is a walkthrough of using the [Obol-SDK](https://www.npmjs.com/package/@obolnetwork/obol-sdk) to propose a four-node distributed validator cluster for creation using the [DV Launchpad](../../learn/intro/launchpad.md). + +## Pre-requisites + +- You have [node.js](https://nodejs.org/en) installed. + +## Install the package + +Install the Obol-SDK package into your development environment + + + +
+      npm install --save @obolnetwork/obol-sdk
+    
+
+ +
+      yarn add @obolnetwork/obol-sdk
+    
+
+
+ +## Instantiate the client + +The first thing you need to do is create an instance of the Obol SDK client. The client takes two constructor parameters: + +- The `chainID` for the chain you intend to use. +- An ethers.js [signer](https://docs.ethers.org/v6/api/providers/#Signer-signTypedData) object. + +```ts +import { Client } from "@obolnetwork/obol-sdk"; +import { ethers } from "ethers"; + +// Create a dummy ethers signer object with a throwaway private key +const mnemonic = ethers.Wallet.createRandom().mnemonic?.phrase || ""; +const privateKey = ethers.Wallet.fromPhrase(mnemonic).privateKey; +const wallet = new ethers.Wallet(privateKey); +const signer = wallet.connect(null); + +// Instantiate the Obol Client for holesky +const obol = new Client({ chainId: 17000 }, signer); +``` + +## Propose the cluster + +List the Ethereum addresses of participating operators, along with withdrawal and fee recipient address data for each validator you intend for the operators to create. + +```ts +// A config hash is a deterministic hash of the proposed DV cluster configuration +const configHash = await obol.createClusterDefinition({ + name: "SDK Demo Cluster", + operators: [ + { address: "0xC35CfCd67b9C27345a54EDEcC1033F2284148c81" }, + { address: "0x33807D6F1DCe44b9C599fFE03640762A6F08C496" }, + { address: "0xc6e76F72Ea672FAe05C357157CfC37720F0aF26f" }, + { address: "0x86B8145c98e5BD25BA722645b15eD65f024a87EC" }, + ], + validators: [ + { + fee_recipient_address: "0x3CD4958e76C317abcEA19faDd076348808424F99", + withdrawal_address: "0xE0C5ceA4D3869F156717C66E188Ae81C80914a6e", + }, + ], +}); + +console.log( + `Direct the operators to https://holesky.launchpad.obol.org/dv?configHash=${configHash} to complete the key generation process` +); +``` + +## Invite the Operators to complete the DKG + +Once the Obol-API returns a `configHash` string from the `createClusterDefinition` method, you can use this identifier to invite the operators to the [Launchpad](../../learn/intro/launchpad.md) to complete the process + +1. Operators navigate to `https://.launchpad.obol.org/dv?configHash=` and complete the [run a DV with others](../../run/start/quickstart_group.mdx) flow. +1. Once the DKG is complete, and operators are using the `--publish` flag, the created cluster details will be posted to the Obol API. +1. The creator will be able to retrieve this data with `obol.getClusterLock(configHash)`, to use for activating the newly created validator. + +## Retrieve the created Distributed Validators using the SDK + +Once the DKG is complete, the proposer of the cluster can retrieve key data such as the validator public keys and their associated deposit data messages. + +```js +const clusterLock = await obol.getClusterLock(configHash); +``` + +Reference lock files can be found [here](https://github.com/ObolNetwork/charon/tree/main/cluster/testdata). + +## Activate the DVs using the deposit contract + +In order to activate the distributed validators, the cluster operator can retrieve the validators' associated deposit data from the lock file and use it to craft transactions to the `deposit()` method on the deposit contract. + +```js +const validatorDepositData = + clusterLock.distributed_validators[validatorIndex].deposit_data; + +const depositContract = new ethers.Contract( + DEPOSIT_CONTRACT_ADDRESS, // 0x00000000219ab540356cBB839Cbe05303d7705Fa for Mainnet, 0xff50ed3d0ec03aC01D4C79aAd74928BFF48a7b2b for Goerli + depositContractABI, // https://etherscan.io/address/0x00000000219ab540356cBB839Cbe05303d7705Fa#code for Mainnet, and replace the address for Goerli + signer +); + +const TX_VALUE = ethers.parseEther("32"); + +const tx = await depositContract.deposit( + validatorDepositData.pubkey, + validatorDepositData.withdrawal_credentials, + validatorDepositData.signature, + validatorDepositData.deposit_data_root, + { value: TX_VALUE } +); + +const txResult = await tx.wait(); +``` + +## Usage Examples + +Examples of how our SDK can be used are found [here](https://github.com/ObolNetwork/obol-sdk-examples). diff --git a/versioned_docs/version-v1.2.0/adv/advanced/quickstart-split.md b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-split.md new file mode 100644 index 0000000000..69718f2231 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/quickstart-split.md @@ -0,0 +1,104 @@ +--- +sidebar_position: 3 +description: Migrate an existing validator by splitting its private key into shares +--- + +# Migrate an Existing Validator + +:::warning +This process should only be used if you want to split an *existing validator private key* into multiple private key shares for use in a Distributed Validator Cluster. **If your existing validator is not properly shut down before the Distributed Validator starts, your validator may be slashed**. + +If you are starting a new validator, you should follow a [quickstart guide](../../run/start/quickstart_overview.md) instead. +::: + +Split an existing Ethereum validator key into multiple key shares for use in an [Obol Distributed Validator Cluster](../../learn/intro/key-concepts.md#distributed-validator-cluster). + +## Pre-requisites + +- Ensure you have the existing validator keystores (the ones to split) and passwords. +- Ensure you have [docker](https://docs.docker.com/engine/install/) installed. +- Make sure `docker` is running before executing the commands below. +- If you use MEV-Boost, you must either: + - Turn off your MEV-Boost client before you split your keys, or; + - Temporarily use a relay you won't be using when running the Distributed Validator; to prevent registering for MEV with a timestamp more recent than the one Charon prepares at the moment of key splitting. + +## Step 1. Prepare the existing keystore files + +Create a folder to hold the encrypted keystores, along with the passwords to decrypt them. + +```shell + # Create a folder + mkdir split_keys +``` + +Copy the existing validator `keystore.json` files into this new folder. Alongside them, with a matching filename but ending with `.txt` should be the password to the keystore (e.g.: `keystore-0.json`, `keystore-0.txt`). The files must start with `keystore*`. + +At the end of this process, you should have a tree like this: + +```shell +├── split_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   ├── keystore-1.txt +│ ... +│   ├── keystore-N.json +│   ├── keystore-N.txt +``` + +## Step 2. Split the keys using the charon docker command + +Run the following docker command to split the keys (for mainnet): + +```shell +CHARON_VERSION= # E.g. v1.2.0 +CLUSTER_NAME= # The name of the cluster you want to create. +WITHDRAWAL_ADDRESS= # The address you want to use for withdrawals (this is just for accuracy in your lock file, you can't change a withdrawal address for a validator that has already been deposited) +FEE_RECIPIENT_ADDRESS= # The address you want to use for block reward and MEV payments. +NODES= # The number of nodes in the cluster. + +docker run --rm -v $(pwd):/opt/charon obolnetwork/charon:${CHARON_VERSION} create cluster \ + --name="${CLUSTER_NAME}" \ + --withdrawal-addresses="${WITHDRAWAL_ADDRESS}" \ + --fee-recipient-addresses="${FEE_RECIPIENT_ADDRESS}" \ + --split-existing-keys \ + --split-keys-dir=/opt/charon/split_keys \ + --nodes ${NODES} \ + --network mainnet +``` + +The above command will create `validator_keys` along with `cluster-lock.json` in `./cluster` for each node. + +Command output: + +```shell +***************** WARNING: Splitting keys ********************** + Please make sure any existing validator has been shut down for + at least 2 finalised epochs before starting the Charon cluster, + otherwise slashing could occur. +**************************************************************** + +Created Charon cluster: + --split-existing-keys=true + +./cluster/ +├─ node[0-*]/ # Directory for each node +│ ├─ charon-enr-private-key # Charon networking private key for node authentication +│ ├─ cluster-lock.json # Cluster lock defines the cluster lock file which is signed by all nodes +│ ├─ validator_keys # Validator keystores and password +│ │ ├─ keystore-*.json # Validator private share key for duty signing +│ │ ├─ keystore-*.txt # Keystore password files for keystore-*.json +``` + +These split keys can now be used to start a Charon cluster. + +## Step 3. (Optional) Encrypt artifacts for distribution + +Within each folder are the encrypted [private key shares](../../learn/intro/key-concepts.md#distributed-validator-key-share), along with the decryption passwords. To transmit these folders to the operators/machines where they will run, it might be prudent to encrypt the folder as a `.zip` to transport them. + +```shell +# For each folder in ./cluster/ encrypt it with a different password +zip -er node1.zip ./cluster/node1/ + +# Repeat for node2,...,nodeN. +``` \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/adv/advanced/self-relay.md b/versioned_docs/version-v1.2.0/adv/advanced/self-relay.md new file mode 100644 index 0000000000..05000327ca --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/advanced/self-relay.md @@ -0,0 +1,38 @@ +--- +sidebar_position: 5 +description: Self-host a relay +--- + +# Self-Host a Relay + +If you are experiencing connectivity issues with the Obol hosted relays, or you want to improve your clusters latency and decentralization, you can opt to host your own relay on a separate open and static internet port. + +```shell +# Figure out your public IP +curl v4.ident.me + +# Clone the repo and cd into it. +git clone https://github.com/ObolNetwork/charon-distributed-validator-node.git + +cd charon-distributed-validator-node + +# Replace 'replace.with.public.ip.or.hostname' in relay/docker-compose.yml with your public IPv4 or DNS hostname + +nano relay/docker-compose.yml + +docker compose -f relay/docker-compose.yml up +``` + +Test whether the relay is publicly accessible. This should return an ENR: +`curl http://replace.with.public.ip.or.hostname:3640/enr` + +Ensure the ENR returned by the relay contains the correct public IP and port by decoding it with [ENR viewer](https://enr-viewer.com/). + +Configure **ALL** charon nodes in your cluster to use this relay: + +- Either by adding a flag: `--p2p-relays=http://replace.with.public.ip.or.hostname:3640/enr` +- Or by setting the environment variable: `CHARON_P2P_RELAYS=http://replace.with.public.ip.or.hostname:3640/enr` + +Note that a local `relay/.charon/charon-enr-private-key` file will be created next to `relay/docker-compose.yml` to ensure a persisted relay ENR across restarts. + +A list of publicly available relays that can be used is maintained [here](../../adv/security/risks.md). \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/adv/security/_category_.json b/versioned_docs/version-v1.2.0/adv/security/_category_.json new file mode 100644 index 0000000000..f445b9f96c --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Security", + "position": 8, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/adv/security/bug-bounty.md b/versioned_docs/version-v1.2.0/adv/security/bug-bounty.md new file mode 100644 index 0000000000..accdc2d72e --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/bug-bounty.md @@ -0,0 +1,180 @@ +--- +sidebar_position: 3 +description: Bug Bounty Policy +--- + +# Obol Bug Bounty Program + +## Overview + +At Obol Labs, we prioritize the security of our distributed validator software and related services. Our Bug Bounty Program is designed to encourage and reward security researchers for identifying and reporting potential vulnerabilities. This initiative supports our commitment to the security and integrity of our products. + +## Participant Eligibility + +Participants must meet the following criteria to be eligible for the Bug Bounty Program: + +- Not reside in countries where participation in such programs is prohibited. +- Be at least 14 years of age and possess the legal capacity to participate. +- Have received consent from your employer, if applicable. +- Not have been employed or contracted by Obol Labs, nor be an immediate family member of an employee, within the last 12 months. + +## Scope of the Program + +Eligible submissions must involve software and services developed by Obol, specifically under the domains of: + +- Charon the DV Middleware Client +- Obol DV Launchpad and Public API +- Obol Splits Contracts +- Obol Labs hosted Public Relay Infrastructure + +Submissions related to the following are considered out of scope: + +- Social engineering +- Rate Limiting (Non-critical issues) +- Physical security breaches +- Non-security related UX/UI issues +- Third-party application vulnerabilities +- The [Obol](https://obol.org) static website or the Obol infrastructure +- The operational security of node operators running or using Obol software + +## Program Rules + +- Submitted bugs must not have been previously disclosed publicly. +- Only first reports of vulnerabilities will be considered for rewards; previously reported or known vulnerabilities are ineligible. +- The severity of the vulnerability, as assessed by our team, will determine the reward amount. See the "Rewards" section for details. +- Submissions must include a reproducible proof of concept. +- The Obol security team reserves the right to determine the eligibility and reward for each submission. +- Program terms may be updated at Obol's discretion. +- Valid bugs may be disclosed to partner protocols within the Obol ecosystem to enhance overall security. + +## Rewards Structure + +Rewards are issued based on the severity and impact of the disclosed vulnerability, determined at the discretion of Obol Labs. + +### Critical Vulnerabilities: Up to $100,000 + +A Critical-level vulnerability is one that has a severe impact on the security of the in-production system from an unauthenticated external attacker, and requires immediate attention to fix. Highly likely to have a material impact on validator private key security, and/or loss of funds. + +- High impact, high likelihood + +Impacts: + +- Attacker that is not a member of the cluster can successfully exfiltrate BLS (not K1) private key material from a threshold number of operators in the cluster. +- Attacker that is not a member of the cluster can achieve the production of arbitrary BLS signatures from a threshold number of operators in the cluster. +- Attacker can craft a malicious cluster invite capable of subverting even careful review of all data to steal funds during a deposit. +- Direct theft of any user funds, whether at-rest or in-motion, other than unclaimed yield +- Direct loss of funds +- Permanent freezing of funds (fix requires hard fork) +- Network not being able to confirm new transactions (Total network shutdown) +- Protocol insolvency + +### High Vulnerabilities: Up to $10,000 + +For significant security risks that impact the system from a position of low-trust and require a significant effort to fix. + +- High impact, medium likelihood +- Medium impact, high likelihood + +Impacts: + +- Attacker that is not a member of the cluster can successfully partition the cluster and keep the cluster offline indefinitely. +- Attacker that is not a member of the cluster can exfiltrate Charon ENR private keys. +- Attacker that is not a member of the cluster can destroy funds but cannot steal them. +- Unintended chain split (Network partition) +- Temporary freezing of network transactions by delaying one block by 500% or more of the average block time of the preceding 24 hours beyond standard difficulty adjustments +- RPC API crash affecting projects with greater than or equal to 25% of the market capitalization on top of the respective layer +- Theft of unclaimed yield +- Theft of unclaimed royalties +- Permanent freezing of unclaimed yield +- Permanent freezing of unclaimed royalties +- Temporary freezing of funds +- Retrieve sensitive data/files from a running server: + - blockchain keys + - database passwords + - (this does not include non-sensitive environment variables, open source code, or usernames) +- Taking state-modifying authenticated actions (with or without blockchain state interaction) on behalf of other users without any interaction by that user, such as: + - Changing cluster information + - Withdrawals + - Making trades + +### Medium Vulnerabilities: Up to $2,500 + +For vulnerabilities with a moderate impact, affecting system availability or integrity. + +- High impact, low likelihood +- Medium impact, medium likelihood +- Low impact, high likelihood + +Impacts: + +- Attacker that is a member of a cluster can exfiltrate K1 key material from another member. +- Attacker that is a member of the cluster can denial of service attack enough peers in the cluster to prevent operation of the validator(s) +- Attacker that is a member of the cluster can bias the protocol in a manner to control the majority of block proposal opportunities. +- Attacker can get a DV Launchpad user to inadvertently interact with a smart contract that is not a part of normal operation of the launchpad. +- Increasing network processing node resource consumption by at least 30% without brute force actions, compared to the preceding 24 hours +- Shutdown of greater than or equal to 30% of network processing nodes without brute force actions, but does not shut down the network +- Charon cluster identity private key theft +- Rogue node operator to penetrate and compromise other nodes to disturb the cluster’s lifecycle +- Charon public relay node is compromised and leads to cluster topologies getting discovered and disrupted +- Smart contract unable to operate due to lack of token funds +- Block stuffing +- Griefing (e.g. no profit motive for an attacker, but damage to the users or the protocol) +- Theft of gas +- Unbounded gas consumption +- Redirecting users to malicious websites (Open Redirect) + +### Low Vulnerabilities: Up to $500 + +For vulnerabilities with minimal impact, unlikely to significantly affect system operations. + +- Low impact, medium likelihood +- Medium impact, low likelihood + +Impacts: + +- Attacker can sometimes put a Charon node in a state that causes it to drop one out of every one hundred attestations made by a validator +- Attacker can display bad data on a non-interactive part of the launchpad. +- Contract fails to deliver promised returns, but doesn't lose value +- Shutdown of greater than 10% or equal to but less than 30% of network processing nodes without brute force actions, but does not shut down the network +- Changing details of other users (including modifying browser local storage) without already-connected wallet interaction and with significant user interaction such as: + - Iframing leading to modifying the backend/browser state (must demonstrate impact with PoC) +- Taking over broken or expired outgoing links such as: + - Social media handles, etc. +- Temporarily disabling user to access target site, such as: + - Locking up the victim from login + - Cookie bombing, etc. + +Rewards may be issued as cash, merchandise, or other forms of recognition, at Obol's discretion. Only one reward will be granted per unique vulnerability. + +## The following activities are prohibited by this bug bounty program + +- Any testing on mainnet or public testnet deployed code; all testing should be done on local-forks of either public testnet or mainnet +- Any testing with pricing oracles or third-party smart contracts +- Attempting phishing or other social engineering attacks against our employees and/or customers +- Any testing with third-party systems and applications (e.g. browser extensions) as well as websites (e.g. SSO providers, advertising networks) +- Any denial of service attacks that are executed against project assets +- Automated testing of services that generate significant amounts of traffic +- Public disclosure of an unpatched vulnerability in an embargoed bounty + +## Submission process + +To report a vulnerability, please contact us at security@obol.tech with: + +- A detailed description of the vulnerability and its potential impact. +- Steps to reproduce the issue. +- Any relevant proof of concept code, screenshots, or documentation. +- Your contact information. + +Incomplete reports may not be eligible for rewards. + +## Disclosure and Confidentiality + +Obol Labs will disclose vulnerabilities and the identity of the researcher (with consent) after remediation. Researchers are required to maintain confidentiality until official disclosure by Obol Labs. + +## Legal and Ethical Compliance + +Participants must adhere to all relevant laws and regulations. Obol Labs will not pursue legal action against researchers reporting vulnerabilities in good faith, but reserves the right to respond to violations of this policy. + +## Non-Disclosure Agreement (NDA) + +Participants may be required to sign an NDA for access to certain proprietary information during their research. diff --git a/versioned_docs/version-v1.2.0/adv/security/contact.md b/versioned_docs/version-v1.2.0/adv/security/contact.md new file mode 100644 index 0000000000..29a07ec23f --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/contact.md @@ -0,0 +1,10 @@ +--- +sidebar_position: 7 +description: Security details for the Obol Network +--- + +# Contacts + +Please email security@obol.tech to report a security incident, vulnerability, bug or inquire about Obol's security. + +Also, visit the [obol security repo](https://github.com/ObolNetwork/obol-security) for more details. diff --git a/versioned_docs/version-v1.2.0/adv/security/ev-assessment.md b/versioned_docs/version-v1.2.0/adv/security/ev-assessment.md new file mode 100644 index 0000000000..dee3538934 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/ev-assessment.md @@ -0,0 +1,295 @@ +--- +sidebar_position: 5 +description: Software Development Security Assessment +--- + +# Software Development at Obol + +When hardening a projects technical security, team member's operational security, and the security of the software development practices in use by the team are some of the most criticial areas to secure. Many hacks and compromises in the space to date have been a result of these attack vectors rather than exploits of the software itself. + +With this in mind, in January 2023 the Obol team retained the expertise of Ethereal Venture's security researcher Alex Wade; to interview key stakeholders and produce a report into the teams Software Development Lifecycle. + +The below page is a result of the report that was produced. What is present here has had some sensitive information redacted, and contains responses to the recommendations made, detailing the actions the Obol team have taken to mitigate what has been highlighted. + +# Obol Report + +**Prepared by: Alex Wade (Ethereal Ventures)** +**Date: Jan 2023** + +Over the past month, I worked with Obol to review their software development practices in preparation for their upcoming security audits. My goals were to review and analyze: + +- Software development processes +- Vulnerability disclosure and escalation procedures +- Key personnel risk + +The information in this report was collected through a series of interviews with Obol’s project leads. + +## Contents + +- Background Info +- Analysis - Cluster Setup and DKG + - Key Risks + - Potential Attack Scenarios +- Recommendations + - R1: Users should deploy cluster contracts through a known on-chain entry point + - R2: Users should deposit to the beacon chain through a pool contract + - R3: Raise the barrier to entry to push an update to the Launchpad +- Additional Notes + - Vulnerability Disclosure + - Key Personnel Risk + +## Background Info + +**Each team lead was asked to describe Obol in terms of its goals, objectives, and key features.** + +### What is Obol? + +Obol builds DVT (Distributed Validator Technology) for Ethereum. + +### What is Obol’s goal? + +Obol’s goal is to solve a classic distributed systems problem: uptime. + +Rather than requiring Ethereum validators to stake on their own, Obol allows groups of operators to stake together. Using Obol, a single validator can be run cooperatively by multiple people across multiple machines. + +In theory, this architecture provides validators with some redundancy against common issues: server and power outages, client failures, and more. + +### What are Obol’s objectives? + +Obol’s business objective is to provide base-layer infrastructure to support a distributed validator ecosystem. As Obol provides base layer technology, other companies and projects will build on top of Obol. + +Obol’s business model is to eventually capture a portion of the revenue generated by validators that use Obol infrastructure. + +### What is Obol’s product? + +Obol’s product consists of three main components, each run by its own team: a webapp, a client, and smart contracts. + +- [DV Launchpad](../../learn/intro/launchpad.md): A webapp to create and manage distributed validators. +- [Charon](../../learn/charon/intro.md): A middleware client that enables operators to run distributed validators. +- [Solidity](../../learn/intro/obol-splits.mdx): Withdrawal and fee recipient contracts for use with distributed validators. + +## Analysis - Cluster Setup and DKG + +The Launchpad guides users through the process of creating a cluster, which defines important parameters like the validator’s fee recipient and withdrawal addresses, as well as the identities of the operators in the cluster. In order to ensure their cluster configuration is correct, users need to rely on a few different factors. + +**First, users need to trust the Charon client** to perform the DKG correctly, and validate things like: + +- Config file is well-formed and is using the expected version +- Signatures and ENRs from other operators are valid +- Cluster config hash is correct +- DKG succeeds in producing valid signatures +- Deposit data is well-formed and is correctly generated from the cluster config and DKG. + +However, Charon’s validation is limited to the digital: signature checks, cluster file syntax, etc. It does NOT help would-be operators determine whether the other operators listed in their cluster definition are the real people with whom they intend to start a DVT cluster. So - + +**Second, users need to come to social consensus with fellow operators.** While the cluster is being set up, it’s important that each operator is an active participant. Each member of the group must validate and confirm that: + +- the cluster file correctly reflects their address and node identity, and reflects the information they received from fellow operators +- the cluster parameters are expected – namely, the number of validators and signing threshold + +**Finally, users need to perform independent validation.** Each user should perform their own validation of the cluster definition: + +- Is my information correct? (address and ENR) +- Does the information I received from the group match the cluster definition? +- Is the ETH2 deposit data correct, and does it match the information in the cluster definition? +- Are the withdrawal and fee recipient addresses correct? + +These final steps are potentially the most difficult, and may require significant technical knowledge. + +## Key Risks + +### 1. Validation of Contract Deployment and Deposit Data Relies Heavily on Launchpad + +From my interviews, it seems that the user deploys both the withdrawal and fee recipient contracts through the Launchpad. + +What I’m picturing is that during the first parts of the cluster setup process, the user is prompted to sign one or more transactions deploying the withdrawal and fee recipient contracts to mainnet. The Launchpad apparently uses an npm package to deploy these contracts: `0xsplits/splits-sdk`, which I assume provides either JSON artifacts or a factory address on chain. The Launchpad then places the deployed contracts into the cluster config file, and the process moves on. + +If an attacker has published a malicious update to the Launchpad (or compromised an underlying dependency), the contracts deployed by the Launchpad may be malicious. The questions I’d like to pose are: + +- How does the group creator know the Launchpad deployed the correct contracts? +- How does the rest of the group know the creator deployed the contracts through the Launchpad? + +My understanding is that this ultimately comes down to the independent verification that each of the group’s members performs during and after the cluster’s setup phase. + +At its worst, this verification might consist solely of the cluster creator confirming to the others that, yes, those addresses match the contracts I deployed through the Launchpad. + +A more sophisticated user might verify that not only do the addresses match, but the deployed source code looks roughly correct. However, this step is far out of the realm of many would-be validators. To be really certain that the source code is correct would require auditor-level knowledge. + +The risk is that: + +- the deployed contracts are NOT the correctly-configured 0xsplits waterfall/fee splitter contracts +- most users are ill-equipped to make this determination themselves +- we don’t want to trust the Launchpad as the single source of truth + +In the worst case, the cluster may end up depositing with malicious withdrawal or fee recipient credentials. If unnoticed, this may net an attacker the entire withdrawal amount, once the cluster exits. + +Note that the same (or similar) risks apply to validation of deposit data, which has the potential to be similarly difficult. I’m a little fuzzy on which part of the Obol stack actually generates the deposit data / deposit transaction, so I can’t speak to this as much. However, I think the mitigation for both of these is roughly the same - read on! + +**Mitigation:** + +It’s certainly a good idea to make it harder to deploy malicious updates to the Launchpad, but this may not be entirely possible. A higher-yield strategy may be to educate and empower users to perform independent validation of the DVT setup process - without relying on information fed to them by Charon and the Launchpad. + +I’ve outlined some ideas for this in #R1 and #R2. + +### 2. Social Consensus, aka “Who sends the 32 ETH?” + +Depositing to the beacon chain requires a total of 32 ETH. Obol’s product allows multiple operators to act as a single validator together, which means would-be operators need to agree on how to fund the 32 ETH needed to initiate the deposit. + +It is my understanding that currently, this process comes down to trust and loose social consensus. Essentially, the group needs to decide who chips in what amount together, and then trust someone to take the 32 ETH and complete the deposit process correctly (without running away with the money). + +Granted, the initial launch of Obol will be open only to a small group of people as the kinks in the system get worked out - but in preparation for an eventual public release, the deposit process needs to be much simpler and far less reliant on trust. + +Mitigation: See #R2. + +#### Potential Attack Scenarios + +During the interview process, I learned that each of Obol’s core components has its own GitHub repo, and that each repo has roughly the same structure in terms of organization and security policies. For each repository: + +- There are two overall github organization administrators, and a number of people have administrative control over individual repositories. +- In order to merge PRs, the submitter needs: + - CI/CD checks to pass + - Review from one person (anyone at Obol) + +Of course, admin access also means the ability to change these settings - so repo admins could theoretically merge PRs without needing checks to pass, and without review/approval, organization admins can control the full GitHub organization. + +The following scenarios describe the impact an attack may have. + +**1. Publishing a malicious version of the Launchpad, or compromising an underlying dependency** + +- Reward: High +- Difficulty: Medium-Low + +As described in Key Risks, publishing a malicious version of the Launchpad has the potential to net the largest payout for an attacker. By tampering with the cluster’s deposit data or withdrawal/fee recipient contracts, an attacker stands to gain 32 ETH or more per compromised cluster. + +During the interviews, I learned that merging PRs to main in the Launchpad repo triggers an action that publishes to the site. Given that merges can be performed by an authorized Obol developer, this makes the developers prime targets for social engineering attacks. + +Additionally, the use of the `0xsplits/splits-sdk` NPM package to aid in contract deployment may represent a supply chain attack vector. It may be that this applies to other Launchpad dependencies as well. + +In any case, with a fairly large surface area and high potential reward, this scenario represents a credible risk to users during the cluster setup and DKG process. + +See #R1, #R2, and #R3 for some ideas to address this scenario. + +**2. Publishing a malicious version of Charon to new operators** + +- Reward: Medium +- Difficulty: High + +During the cluster setup process, Charon is responsible both for validating the cluster configuration produced by the Launchpad, as well as performing a DKG ceremony between a group’s operators. + +If new operators use a malicious version of Charon to perform this process, it may be possible to tamper with both of these responsibilities, or even get access to part or all of the underlying validator private key created during DKG. + +However, the difficulty of this type of attack seems quite high. An attacker would first need to carry out the same type of social engineering attack described in scenario 1 to publish and tag a new version of Charon. Crucially, users would also need to install the malicious version - unlike the Launchpad, an update here is not pushed directly to users. + +As long as Obol is clear and consistent with communication around releases and versioning, it seems unlikely that a user would both install a brand-new, unannounced release, and finish the cluster setup process before being warned about the attack. + +**3. Publishing a malicious version of Charon to existing validators** + +- Reward: Low +- Difficulty: High + +Once a distributed validator is up and running, much of the danger has passed. As a middleware client, Charon sits between a validator’s consensus and validator clients. As such, it shouldn’t have direct access to a validator’s withdrawal keys nor signing keys. + +If existing validators update to a malicious version of Charon, it’s likely the worst thing an attacker could theoretically do is slash the validator, however, assuming Charon has no access to any private keys, this would be predicated on one or more validator clients connected to Charon also failing to prevent the signing of a slashable message. In practice, a compromised Charon client is more likely to pose liveness risks than safety risks. + +This is not likely to be particularly motivating to potential attackers - and paired with the high difficulty described above, this scenario seems unlikely to cause significant issues. + +## Recommendations + +### R1: Users should deploy cluster contracts through a known on-chain entry point + +During setup, users should only sign one transaction via the Launchpad - to a contract located at an Obol-held ENS (e.g. `launchpad.obol.eth`). This contract should deploy everything needed for the cluster to operate, like the withdrawal and fee recipient contracts. It should also initialize them with the provided reward split configuration (and any other config needed). + +Rather than using an NPM library to supply a factory address or JSON artifacts, this has the benefit of being both: + +- **Harder to compromise:** as long as the user knows launchpad.obol.eth, it’s pretty difficult to trick them into deploying the wrong contracts. +- **Easier to validate** for non-technical users: the Obol contract can be queried for deployment information via etherscan. For example: + +![Etherscan Contract View Screenshot](/img/EtherscanContractView.png) + +Note that in order for this to be successful, Obol needs to provide detailed steps for users to perform manual validation of their cluster setups. Users should be able to treat this as a “checklist:” + +- Did I send a transaction to `launchpad.obol.eth`? +- Can I use the ENS name to locate and query the deployment manager contract on etherscan? +- If I input my address, does etherscan report the configuration I was expecting? + - withdrawal address matches + - fee recipient address matches + - reward split configuration matches + +As long as these steps are plastered all over the place (i.e. not just on the Launchpad) and Obol puts in effort to educate users about the process, this approach should allow users to validate cluster configurations themselves - regardless of Launchpad or NPM package compromise. + +#### Obol’s response + +Roadmapped: add the ability for the OWR factory to claim and transfer its reverse resolution ownership. + +### R2: Users should deposit to the beacon chain through a pool contract + +Once cluster setup and DKG is complete, a group of operators should deposit to the beacon chain by way of a pool contract. The pool contract should: + +- Accept Eth from any of the group’s operators +- Stop accepting Eth when the contract’s balance hits (32 ETH * number of validators) +- Make it easy to pull the trigger and deposit to the beacon chain once the critical balance has been reached +- Offer all of the group’s operators a “bail” option at any point before the deposit is triggered + +Ideally, this contract is deployed during the setup process described in #R1, as another step toward allowing users to perform independent validation of the process. + +Rather than relying on social consensus, this should: + +- Allow operators to fund the validator without needing to trust any single party +- Make it harder to mess up the deposit or send funds to some malicious actor, as the pool contract should know what the beacon deposit contract address is + +#### Obol’s response + +Roadmapped: give the operators a streamlined, secure way to deposit Ether (ETH) to the beacon chain collectively, satisfying specific conditions: + +- Pooling from multiple operators. +- Ceasing to accept ETH once a critical balance is reached, defined by 32 ETH multiplied by the number of validators. +- Facilitating an immediate deposit to the beacon chain once the target balance is reached. +- Provide a 'bail-out' option for operators to withdraw their contribution before initiating the group's deposit to the beacon chain. + +### R3: Raise the barrier to entry to push an update to the Launchpad + +Currently, any repo admin can publish an update to the Launchpad unchecked. + +Given the risks and scenarios outlined above, consider amending this process so that the sole compromise of either admin is not sufficient to publish to the Launchpad site. It may be worthwhile to require both admins to approve publishing to the site. + +Along with simply adding additional prerequisites to publish an update to the Launchpad, ensure that both admins have enabled some level of multi-factor authentication on their GitHub accounts. + +#### Obol’s response + +We removed individual’s ability to merge changes without review, enforced MFA, signed commits, and employed Bulldozer bot to make sure a PR gets merged automatically when all checks pass. + +## Additional Notes + +### Vulnerability Disclosure + +During the interviews, I got some conflicting information when asking about Obol’s vulnerability disclosure process. + +Some interviewees directed me towards Obol’s security repo, which details security contacts: [ObolNetwork/obol-security](https://github.com/ObolNetwork/obol-security), while some answered that disclosure should happen primarily through Immunefi. While these may both be part of the correct answer, it seems that Obol’s disclosure process may not be as well-defined as it could be. Here are some notes: + +- I wasn’t able to find information about Obol on Immunefi. I also didn’t find any reference to a security contact or disclosure policy in Obol’s docs. +- When looking into the obol security repo, I noticed broken links in a few of the sections in README.md and SECURITY.md: + - Security policy + - More Information +- Some of the text and links in the Bug Bounty Program don’t seem to apply to Obol (see text referring to Vaults and Strategies). +- The Receiving Disclosures section does not include a public key with which submitters can encrypt vulnerability information. + +It’s my understanding that these items are probably lower priority due to Obol’s initial closed launch - but these should be squared away soon! +[Obol response to latest vuln disclosure process goes here] + +#### Obol’s response + +we addressed all of the concerns in the obol-security repository: + + 1. The security policy link has been fixed + 2. The Bug Bounty program received an overhaul and clearly states rewards, eligibility, and scope + 3. We list two GPG public keys for which we accept encrypted vulnerabilities reports. + +We are actively working towards integrating Immunefi in our security pipeline. + +### Key Personnel Risk + +A final section on the specifics of key personnel risk faced by Obol has been redacted from the original report. Particular areas of control highlighted were github org ownership and domain name control. + +#### Obol’s response + +These risks have been mitigated by adding an extra admin to the github org, and by setting up a second DNS stack in case the primary one fails, along with general Opsec improvements. diff --git a/versioned_docs/version-v1.2.0/adv/security/overview.md b/versioned_docs/version-v1.2.0/adv/security/overview.md new file mode 100644 index 0000000000..30ff11dc90 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/overview.md @@ -0,0 +1,40 @@ +--- +sidebar_position: 1 +description: Security Overview +--- + +# Overview + +This page serves as an overview of the Obol Network from a security point of view. + +This page is updated quarterly. The last update was on 2024-June-19. + +## Table of Contents + +- [Overview](#overview) + - [Table of Contents](#table-of-contents) + - [List of Security Audits and Assessments](#list-of-security-audits-and-assessments) + - [Security focused documents](#security-focused-documents) + - [Bug Bounty](#bug-bounty) + +## List of Security Audits and Assessments + +The completed audits reports are linked [here](https://github.com/ObolNetwork/obol-security/tree/main/audits). + +- A review of Obol Labs [development processes](./ev-assessment) by [Ethereal Ventures](https://www.etherealventures.com/). + +- A [security assessment](https://github.com/ObolNetwork/obol-security/blob/f9d7b0ad0bb8897f74ccb34cd4bd83012ad1d2b5/audits/Sigma_Prime_Obol_Network_Charon_Security_Assessment_Report_v2_1.pdf) of Charon by [Sigma Prime](https://sigmaprime.io/) resulting in version [`v0.16.0`](https://github.com/ObolNetwork/charon/releases/tag/v0.16.0). + +- A second [assessment of Charon](https://obol.tech/charon_quantstamp_assessment.pdf) by [QuantStamp](https://quantstamp.com/) resulting in version [`v0.19.1`](https://github.com/ObolNetwork/charon/releases/tag/v0.19.1). + +- A [solidity audit](./smart_contract_audit) of the Obol Splits contracts by [Zach Obront](https://zachobront.com/). + +- A [penetration testing certificate](https://github.com/ObolNetwork/obol-security/blob/main/audits/Sayfer_2024-03_Penetration_Testing_CFD.pdf) of the Obol DV Launchpad by [Sayfer](https://sayfer.io/). + +## Security focused documents + +- A [threat model](./threat_model) for a DV middleware client like Charon. + +## Bug Bounty + +Information related to disclosing bugs and vulnerabilities to Obol can be found on [the next page](./bug-bounty.md). diff --git a/versioned_docs/version-v1.2.0/adv/security/risks.md b/versioned_docs/version-v1.2.0/adv/security/risks.md new file mode 100644 index 0000000000..3bec3e9d11 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/risks.md @@ -0,0 +1,45 @@ +--- +sidebar_position: 2 +description: Centralization Risks and mitigation +--- + +# Centralization Risks and Mitigation + +## Risk: Obol hosting the relay infrastructure + +**Mitigation**: Self-host a relay. + +One of the risks associated with Obol hosting the [LibP2P relays](../../learn/charon/networking.mdx) infrastructure allowing peer discovery is that if Obol-hosted relays go down, peers won't be able to discover each other and perform the DKG. To mitigate this risk, external organizations and node operators can consider self-hosting a relay. This way, if Obol's relays go down, the clusters can still operate through other relays in the network. Ensure that all nodes in the cluster use the same relays, or they will not be able to find each other if they are connected to different relays. + +The following non-Obol entities run relays that you can consider adding to your cluster (you can have more than one per cluster, see the `--p2p-relays` flag of [`charon run`](../../learn/charon/charon-cli-reference.md#the-run-command)): + + +| Entity | Relay URL | +|-----------|---------------------------------------| +| [DSRV](https://www.dsrvlabs.com/) | https://charon-relay.dsrvlabs.dev | +| [Infstones](https://infstones.com/) | https://obol-relay.infstones.com/ | +| [Hashquark](https://www.hashquark.io/) | https://relay-2.prod-relay.721.land/ | +| [Figment](https://figment.io/) | https://relay-1.obol.figment.io/ | +| [Node Guardians](https://nodeguardians.io/) | https://obol-relay.nodeguardians.io/ | + +## Risk: Obol being able to update Charon code + +**Mitigation**: Pin specific docker versions or compile from source on a trusted commit. + +Another risk associated with Obol is the Labs team having the ability to update the [Charon code](https://github.com/ObolNetwork/charon) used by node operators within DV clusters, which could introduce vulnerabilities or malicious code. To mitigate this risk, operators can consider pinning specific versions of the Docker image or git repo that have been [thoroughly tested](../security/overview.md#list-of-security-audits-and-assessments) and accepted by the network. This would ensure that any updates are carefully vetted and reviewed by the community, and only introduced into a running cluster gradually. The labs team will strive to communicate the security or operational impact any Charon update entails, giving operators the chance to decide whether they want potential performance or quality of experience improvements, or whether they remain on a trusted version for longer. + +## Risk: Obol hosting the DV Launchpad + +**Mitigation**: Use [`create cluster`](../../learn/charon/charon-cli-reference.md#the-create-command) or [`create dkg`](../../learn/charon/charon-cli-reference.md#creating-the-configuration-for-a-dkg-ceremony) locally and distribute the files manually. + +Hosting the first Charon frontend, the [DV Launchpad](../../learn/intro/launchpad.md), on a centralized server could create a single point of failure, as users would have to rely on Obol's server to access the protocol. This could limit the decentralization of the protocol and could make it vulnerable to attacks or downtime. Obol hosting the launchpad on a decentralized network, such as IPFS is a first step but not enough. This is why the Charon code is open-source and contains a CLI interface to interact with the protocol locally. + +To mitigate the risk of launchpad failure, consider using the `create cluster` or `create dkg` commands locally and distributing the key shares files manually. + +## Risk: Obol going bust/rogue + +**Mitigation**: Use key recovery. + +The final centralization risk associated with Obol is the possibility of the company going bankrupt or acting maliciously, which would lead to a loss of control over the network and potentially cause damage to the ecosystem. To mitigate this risk, Obol has implemented a key recovery mechanism. This would allow the clusters to continue operating and to retrieve full private keys even if Obol is no longer able to provide support. + +A guide to recombine key shares into a single private key can be accessed [here](../advanced/quickstart-combine.md). diff --git a/versioned_docs/version-v1.2.0/adv/security/smart_contract_audit.mdx b/versioned_docs/version-v1.2.0/adv/security/smart_contract_audit.mdx new file mode 100644 index 0000000000..bfb14b3427 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/smart_contract_audit.mdx @@ -0,0 +1,495 @@ +--- +sidebar_position: 4 +description: Smart Contract Audit +--- + +# Smart Contract Audit + + + + + + + +
+

Obol Audit Report

+

Obol Manager Contracts

+

Prepared by: Zach Obront, Independent Security Researcher

+

Date: Sept 18 to 22, 2023

+

PDF Version

+
+ +## About **Obol** + +The Obol Network is an ecosystem for trust minimized staking that enables people to create, test, run & co-ordinate distributed validators. + +The Obol Manager contracts are responsible for distributing validator rewards and withdrawals among the validator and node operators involved in a distributed validator. + +## About **zachobront** + +Zach Obront is an independent smart contract security researcher. He serves as a Lead Senior Watson at Sherlock, a Security Researcher at Spearbit, and has identified multiple critical severity bugs in the wild, including in a Top 5 Protocol on Immunefi. You can say hi on Twitter at [@zachobront](http://twitter.com/zachobront). + +## Summary & Scope + +The [ObolNetwork/obol-manager-contracts](https://github.com/ObolNetwork/obol-manager-contracts/) repository was audited at commit [50ce277919723c80b96f6353fa8d1f8facda6e0e](https://github.com/ObolNetwork/obol-manager-contracts/tree/50ce277919723c80b96f6353fa8d1f8facda6e0e). + +The following contracts were in scope: + +- src/controllers/ImmutableSplitController.sol +- src/controllers/ImmutableSplitControllerFactory.sol +- src/lido/LidoSplit.sol +- src/lido/LidoSplitFactory.sol +- src/owr/OptimisticWithdrawalReceiver.sol +- src/owr/OptimisticWithdrawalReceiverFactory.sol + +After completion of the fixes, the [2f4f059bfd145f5f05d794948c918d65d222c3a9](https://github.com/ObolNetwork/obol-manager-contracts/tree/2f4f059bfd145f5f05d794948c918d65d222c3a9) commit was reviewed. After this review, the updated Lido fee share system in [PR #96](https://github.com/ObolNetwork/obol-manager-contracts/pull/96/files) (at commit [fd244a05f964617707b0a40ebb11b523bbd683b8](https://github.com/ObolNetwork/obol-splits/pull/96/commits/fd244a05f964617707b0a40ebb11b523bbd683b8)) was reviewed. + +## Summary of Findings + +| Identifier | Title | Severity | Fixed | +| :------: | ---------------------------- | :-------------: | :-----: | +| [M-01](#m-01-future-fees-may-be-skirted-by-setting-a-non-eth-reward-token) | Future fees may be skirted by setting a non-ETH reward token | Medium | ✓ | +| [M-02](#m-02-splits-with-256-or-more-node-operators-will-not-be-able-to-switch-on-fees) | Splits with 256 or more node operators will not be able to switch on fees | Medium | ✓ | +| [M-03](#m-03-in-a-mass-slashing-event-node-operators-are-incentivized-to-get-slashed) | In a mass slashing event, node operators are incentivized to get slashed | Medium | | +| [L-01](#l-01-obol-fees-will-be-applied-retroactively-to-all-non-distributed-funds-in-the-splitter) | Obol fees will be applied retroactively to all non-distributed funds in the Splitter | Low | ✓ | +| [L-02](#l-02-if-owr-is-used-with-rebase-tokens-and-theres-a-negative-rebase-principal-can-be-lost) | If OWR is used with rebase tokens and there's a negative rebase, principal can be lost | Low | ✓ | +| [L-03](#l-03-lidosplit-can-receive-eth-which-will-be-locked-in-contract) | LidoSplit can receive ETH, which will be locked in contract | Low | ✓ | +| [L-04](#l-04-upgrade-to-latest-version-of-solady-to-fix-libclone-bug) | Upgrade to latest version of Solady to fix LibClone bug | Low | ✓ | +| [G-01](#g-01-steth-and-wsteth-addresses-can-be-saved-on-implementation-to-save-gas) | stETH and wstETH addresses can be saved on implementation to save gas | Gas | ✓ | +| [G-02](#g-02-owr-can-be-simplified-and-save-gas-by-not-tracking-distributedfunds) | OWR can be simplified and save gas by not tracking distributedFunds | Gas | ✓ | +| [I-01](#i-01-strong-trust-assumptions-between-validators-and-node-operators) | Strong trust assumptions between validators and node operators | Informational | | +| [I-02](#i-02-provide-node-operator-checklist-to-validate-setup) | Provide node operator checklist to validate setup | Informational | | + +## Detailed Findings + +### [M-01] Future fees may be skirted by setting a non-ETH reward token + +Fees are planned to be implemented on the `rewardRecipient` splitter by updating to a new fee structure using the `ImmutableSplitController`. + +It is assumed that all rewards will flow through the splitter, because (a) all distributed rewards less than 16 ETH are sent to the `rewardRecipient`, and (b) even if a team waited for rewards to be greater than 16 ETH, rewards sent to the `principalRecipient` are capped at the `amountOfPrincipalStake`. + +This creates a fairly strong guarantee that reward funds will flow to the `rewardRecipient`. Even if a user were to set their `amountOfPrincipalStake` high enough that the `principalRecipient` could receive unlimited funds, the Obol team could call `distributeFunds()` when the balance got near 16 ETH to ensure fees were paid. + +However, if the user selects a non-ETH token, all ETH will be withdrawable only thorugh the `recoverFunds()` function. If they set up a split with their node operators as their `recoveryAddress`, all funds will be withdrawable via `recoverFunds()` without ever touching the `rewardRecipient` or paying a fee. + +#### Recommendation + +I would recommend removing the ability to use a non-ETH token from the `OptimisticWithdrawalRecipient`. Alternatively, if it feels like it may be a use case that is needed, it may make sense to always include ETH as a valid token, in addition to any `OWRToken` set. + +#### Review + +Fixed in [PR 85](https://github.com/ObolNetwork/obol-manager-contracts/pull/85) by removing the ability to use non-ETH tokens. + +### [M-02] Splits with 256 or more node operators will not be able to switch on fees + +0xSplits is used to distribute rewards across node operators. All Splits are deployed with an ImmutableSplitController, which is given permissions to update the split one time to add a fee for Obol at a future date. + +The Factory deploys these controllers as Clones with Immutable Args, hard coding the `owner`, `accounts`, `percentAllocations`, and `distributorFee` for the future update. This data is packed as follows: + +```solidity + function _packSplitControllerData( + address owner, + address[] calldata accounts, + uint32[] calldata percentAllocations, + uint32 distributorFee + ) internal view returns (bytes memory data) { + uint256 recipientsSize = accounts.length; + uint256[] memory recipients = new uint[](recipientsSize); + + uint256 i = 0; + for (; i < recipientsSize;) { + recipients[i] = (uint256(percentAllocations[i]) << ADDRESS_BITS) | uint256(uint160(accounts[i])); + + unchecked { + i++; + } + } + + data = abi.encodePacked(splitMain, distributorFee, owner, uint8(recipientsSize), recipients); + } +``` + +In the process, `recipientsSize` is unsafely downcasted into a `uint8`, which has a maximum value of `256`. As a result, any values greater than 256 will overflow and result in a lower value of `recipients.length % 256` being passed as `recipientsSize`. + +When the Controller is deployed, the full list of `percentAllocations` is passed to the `validSplit` check, which will pass as expected. However, later, when `updateSplit()` is called, the `getNewSplitConfiguation()` function will only return the first `recipientsSize` accounts, ignoring the rest. + +```solidity + function getNewSplitConfiguration() + public + pure + returns (address[] memory accounts, uint32[] memory percentAllocations) + { + // fetch the size first + // then parse the data gradually + uint256 size = _recipientsSize(); + accounts = new address[](size); + percentAllocations = new uint32[](size); + + uint256 i = 0; + for (; i < size;) { + uint256 recipient = _getRecipient(i); + accounts[i] = address(uint160(recipient)); + percentAllocations[i] = uint32(recipient >> ADDRESS_BITS); + unchecked { + i++; + } + } + } +``` + +When `updateSplit()` is eventually called on `splitsMain` to turn on fees, the `validSplit()` check on that contract will revert because the sum of the percent allocations will no longer sum to `1e6`, and the update will not be possible. + +#### Proof of Concept + +The following test can be dropped into a file in `src/test` to demonstrate that passing 400 accounts will result in a `recipientSize` of `400 - 256 = 144`: + +```solidity +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Test } from "forge-std/Test.sol"; +import { console } from "forge-std/console.sol"; +import { ImmutableSplitControllerFactory } from "src/controllers/ImmutableSplitControllerFactory.sol"; +import { ImmutableSplitController } from "src/controllers/ImmutableSplitController.sol"; + +interface ISplitsMain { + function createSplit(address[] calldata accounts, uint32[] calldata percentAllocations, uint32 distributorFee, address controller) external returns (address); +} + +contract ZachTest is Test { + function testZach_RecipientSizeCappedAt256Accounts() public { + vm.createSelectFork("https://mainnet.infura.io/v3/fb419f740b7e401bad5bec77d0d285a5"); + + ImmutableSplitControllerFactory factory = new ImmutableSplitControllerFactory(address(9999)); + bytes32 deploymentSalt = keccak256(abi.encodePacked(uint256(1102))); + address owner = address(this); + + address[] memory bigAccounts = new address[](400); + uint32[] memory bigPercentAllocations = new uint32[](400); + + for (uint i = 0; i < 400; i++) { + bigAccounts[i] = address(uint160(i)); + bigPercentAllocations[i] = 2500; + } + + // confirmation that 0xSplits will allow creating a split with this many accounts + // dummy acct passed as controller, but doesn't matter for these purposes + address split = ISplitsMain(0x2ed6c4B5dA6378c7897AC67Ba9e43102Feb694EE).createSplit(bigAccounts, bigPercentAllocations, 0, address(8888)); + + ImmutableSplitController controller = factory.createController(split, owner, bigAccounts, bigPercentAllocations, 0, deploymentSalt); + + // added a public function to controller to read recipient size directly + uint savedRecipientSize = controller.ZachTest__recipientSize(); + assert(savedRecipientSize < 400); + console.log(savedRecipientSize); // 144 + } +} +``` + +#### Recommendation + +When packing the data in `_packSplitControllerData()`, check `recipientsSize` before downcasting to a uint8: + +```diff +function _packSplitControllerData( + address owner, + address[] calldata accounts, + uint32[] calldata percentAllocations, + uint32 distributorFee +) internal view returns (bytes memory data) { + uint256 recipientsSize = accounts.length; ++ if (recipientsSize > 256) revert InvalidSplit__TooManyAccounts(recipientSize); + ... +} +``` + +#### Review + +Fixed as recommended in [PR 86](https://github.com/ObolNetwork/obol-manager-contracts/pull/86). + +### [M-03] In a mass slashing event, node operators are incentivized to get slashed + +When the `OptimisticWithdrawalRecipient` receives funds from the beacon chain, it uses the following rule to determine the allocation: + +> If the amount of funds to be distributed is greater than or equal to 16 ether, it is assumed that it is a withdrawal (to be returned to the principal, with a cap on principal withdrawals of the total amount they deposited). + +> Otherwise, it is assumed that the funds are rewards. + +This value being as low as 16 ether protects against any predictable attack the node operator could perform. For example, due to the effect of hysteresis in updating effective balances, it does not seem to be possible for node operators to predictably bleed a withdrawal down to be below 16 ether (even if they timed a slashing perfectly). + +However, in the event of a mass slashing event, slashing punishments can be much more severe than they otherwise would be. To calculate the size of a slash, we: + +- take the total percentage of validator stake slashed in the 18 days preceding and following a user's slash +- multiply this percentage by 3 (capped at 100%) +- the full slashing penalty for a given validator equals 1/32 of their stake, plus the resulting percentage above applied to the remaining 31/32 of their stake + +In order for such penalties to bring the withdrawal balance below 16 ether (assuming a full 32 ether to start), we would need the percentage taken to be greater than `15 / 31 = 48.3%`, which implies that `48.3 / 3 = 16.1%` of validators would need to be slashed. + +Because the measurement is taken from the 18 days before and after the incident, node operators would have the opportunity to see a mass slashing event unfold, and later decide that they would like to be slashed along with it. + +In the event that they observed that greater than 16.1% of validators were slashed, Obol node operators would be able to get themselves slashed, be exited with a withdrawal of less than 16 ether, and claim that withdrawal as rewards, effectively stealing from the principal recipient. + +#### Recommendations + +Find a solution that provides a higher level of guarantee that the funds withdrawn are actually rewards, and not a withdrawal. + +#### Review + +Acknowledged. We believe this is a black swan event. It would require a major ETH client to be compromised, and would be a betrayal of trust, so likely not EV+ for doxxed operators. Users of this contract with unknown operators should be wary of such a risk. + +### [L-01] Obol fees will be applied retroactively to all non-distributed funds in the Splitter + +When Obol decides to turn on fees, a call will be made to `ImmutableSplitController::updateSplit()`, which will take the predefined split parameters (the original user specified split with Obol's fees added in) and call `updateSplit()` to implement the change. + +```solidity +function updateSplit() external payable { + if (msg.sender != owner()) revert Unauthorized(); + + (address[] memory accounts, uint32[] memory percentAllocations) = getNewSplitConfiguration(); + + ISplitMain(splitMain()).updateSplit(split, accounts, percentAllocations, uint32(distributorFee())); +} +``` + +If we look at the code on `SplitsMain`, we can see that this `updateSplit()` function is applied retroactively to all funds that are already in the split, because it updates the parameters without performing a distribution first: + +```solidity +function updateSplit( + address split, + address[] calldata accounts, + uint32[] calldata percentAllocations, + uint32 distributorFee +) + external + override + onlySplitController(split) + validSplit(accounts, percentAllocations, distributorFee) +{ + _updateSplit(split, accounts, percentAllocations, distributorFee); +} +``` + +This means that any funds that have been sent to the split but have not yet be distributed will be subject to the Obol fee. Since these splitters will be accumulating all execution layer fees, it is possible that some of them may have received large MEV bribes, where this after-the-fact fee could be quite expensive. + +#### Recommendation + +The most strict solution would be for the `ImmutableSplitController` to store both the old split parameters and the new parameters. The old parameters could first be used to call `distributeETH()` on the split, and then `updateSplit()` could be called with the new parameters. + +If storing both sets of values seems too complex, the alternative would be to require that `split.balance <= 1` to update the split. Then the Obol team could simply store the old parameters off chain to call `distributeETH()` on each split to "unlock" it to update the fees. + +(Note that for the second solution, the ETH balance should be less than or equal to 1, not 0, because 0xSplits stores empty balances as `1` for gas savings.) + +#### Review + +Fixed as recommended in [PR 86](https://github.com/ObolNetwork/obol-manager-contracts/pull/86). + +### [L-02] If OWR is used with rebase tokens and there's a negative rebase, principal can be lost + +The `OptimisticWithdrawalRecipient` is deployed with a specific token immutably set on the clone. It is presumed that that token will usually be ETH, but it can also be an ERC20 to account for future integrations with tokenized versions of ETH. + +In the event that one of these integrations used a rebasing version of ETH (like `stETH`), the architecture would need to be set up as follows: + +`OptimisticWithdrawalRecipient => rewards to something like LidoSplit.sol => Split Wallet` + +In this case, the OWR would need to be able to handle rebasing tokens. + +In the event that rebasing tokens are used, there is the risk that slashing or inactivity leads to a period with a negative rebase. In this case, the following chain of events could happen: + +- `distribute(PULL)` is called, setting `fundsPendingWithdrawal == balance` +- rebasing causes the balance to decrease slightly +- `distribute(PULL)` is called again, so when `fundsToBeDistributed = balance - fundsPendingWithdrawal` is calculated in an unchecked block, it ends up being near `type(uint256).max` +- since this is more than `16 ether`, the first `amountOfPrincipalStake - _claimedPrincipalFunds` will be allocated to the principal recipient, and the rest to the reward recipient +- we check that `endingDistributedFunds <= type(uint128).max`, but unfortunately this check misses the issue, because only `fundsToBeDistributed` underflows, not `endingDistributedFunds` +- `_claimedPrincipalFunds` is set to `amountOfPrincipalStake`, so all future claims will go to the reward recipient +- the `pullBalances` for both recipients will be set higher than the balance of the contract, and so will be unusable + +In this situation, the only way for the principal to get their funds back would be for the full `amountOfPrincipalStake` to hit the contract at once, and for them to call `withdraw()` before anyone called `distribute(PUSH)`. If anyone was to be able to call `distribute(PUSH)` before them, all principal would be sent to the reward recipient instead. + +#### Recommendation + +Similar to #74, I would recommend removing the ability for the `OptimisticWithdrawalRecipient` to accept non-ETH tokens. + +Otherwise, I would recommend two changes for redundant safety: + +1) Do not allow the OWR to be used with rebasing tokens. + +2) Move the `_fundsToBeDistributed = _endingDistributedFunds - _startingDistributedFunds;` out of the unchecked block. The case where `_endingDistributedFunds` underflows is already handled by a later check, so this one change should be sufficient to prevent any risk of this issue. + +#### Review + +Fixed in [PR 85](https://github.com/ObolNetwork/obol-manager-contracts/pull/85) by removing the ability to use non-ETH tokens. + +### [L-03] LidoSplit can receive ETH, which will be locked in contract + +Each new `LidoSplit` is deployed as a clone, which comes with a `receive()` function for receiving ETH. + +However, the only function on `LidoSplit` is `distribute()`, which converts `stETH` to `wstETH` and transfers it to the `splitWallet`. + +While this contract should only be used for Lido to pay out rewards (which will come in `stETH`), it seems possible that users may accidentally use the same contract to receive other validator rewards (in ETH), or that Lido governance may introduce ETH payments in the future, which would cause the funds to be locked. + +#### Proof of Concept + +The following test can be dropped into `LidoSplit.t.sol` to confirm that the clones can currently receive ETH: + +```solidity +function testZach_CanReceiveEth() public { + uint before = address(lidoSplit).balance; + payable(address(lidoSplit)).transfer(1 ether); + assertEq(address(lidoSplit).balance, before + 1 ether); +} +``` + +#### Recommendation + +Introduce an additional function to `LidoSplit.sol` which wraps ETH into stETH before calling `distribute()`, in order to rescue any ETH accidentally sent to the contract. + +#### Review + +Fixed in [PR 87](https://github.com/ObolNetwork/obol-manager-contracts/pull/87/files) by adding a `rescueFunds()` function that can send ETH or any ERC20 (except `stETH` or `wstETH`) to the `splitWallet`. + +### [L-04] Upgrade to latest version of Solady to fix LibClone bug + +In the recent [Solady audit](https://github.com/Vectorized/solady/blob/main/audits/cantina-solady-report.pdf), an issue was found the affects LibClone. + +In short, LibClone assumes that the length of the immutable arguments on the clone will fit in 2 bytes. If it's larger, it overlaps other op codes and can lead to strange behaviors, including causing the deployment to fail or causing the deployment to succeed with no resulting bytecode. + +Because the `ImmutableSplitControllerFactory` allows the user to input arrays of any length that will be encoded as immutable arguments on the Clone, we can manipulate the length to accomplish these goals. + +Fortunately, failed deployments or empty bytecode (which causes a revert when `init()` is called) are not problems in this case, as the transactions will fail, and it can only happen with unrealistically long arrays that would only be used by malicious users. + +However, it is difficult to be sure how else this risk might be exploited by using the overflow to jump to later op codes, and it is recommended to update to a newer version of Solady where the issue has been resolved. + +#### Proof of Concept + +If we comment out the `init()` call in the `createController()` call, we can see that the following test "successfully" deploys the controller, but the result is that there is no bytecode: + +```solidity +function testZach__CreateControllerSoladyBug() public { + ImmutableSplitControllerFactory factory = new ImmutableSplitControllerFactory(address(9999)); + bytes32 deploymentSalt = keccak256(abi.encodePacked(uint256(1102))); + address owner = address(this); + + address[] memory bigAccounts = new address[](28672); + uint32[] memory bigPercentAllocations = new uint32[](28672); + + for (uint i = 0; i < 28672; i++) { + bigAccounts[i] = address(uint160(i)); + if (i < 32) bigPercentAllocations[i] = 820; + else bigPercentAllocations[i] = 34; + } + + ImmutableSplitController controller = factory.createController(address(8888), owner, bigAccounts, bigPercentAllocations, 0, deploymentSalt); + assert(address(controller) != address(0)); + assert(address(controller).code.length == 0); +} +``` + +#### Recommendation + +Delete Solady and clone it from the most recent commit, or any commit after the fixes from [PR #548](https://github.com/Vectorized/solady/pull/548/files#diff-27a3ba4730de4b778ecba4697ab7dfb9b4f30f9e3666d1e5665b194fe6c9ae45) were merged. + +#### Review + +Solady has been updated to v.0.0.123 in [PR 88](https://github.com/ObolNetwork/obol-manager-contracts/pull/88). + +### [G-01] stETH and wstETH addresses can be saved on implementation to save gas + +The `LidoSplitFactory` contract holds two immutable values for the addresses of the `stETH` and `wstETH` tokens. + +When new clones are deployed, these values are encoded as immutable args. This adds the values to the contract code of the clone, so that each time a call is made, they are passed as calldata along to the implementation, which reads the values from the calldata for use. + +Since these values will be consistent across all clones on the same chain, it would be more gas efficient to store them in the implementation directly, which can be done with `immutable` storage values, set in the constructor. + +This would save 40 bytes of calldata on each call to the clone, which leads to a savings of approximately 640 gas on each call. + +#### Recommendation + +1) Add the following to `LidoSplit.sol`: + +```solidity +address immutable public stETH; +address immutable public wstETH; +``` + +2) Add a constructor to `LidoSplit.sol` which sets these immutable values. Solidity treats immutable values as constants and stores them directly in the contract bytecode, so they will be accessible from the clones. + +3) Remove `stETH` and `wstETH` from `LidoSplitFactory.sol`, both as storage values, arguments to the constructor, and arguments to `clone()`. + +4) Adjust the `distribute()` function in `LidoSplit.sol` to read the storage values for these two addresses, and remove the helper functions to read the clone's immutable arguments for these two values. + +#### Review + +Fixed as recommended in [PR 87](https://github.com/ObolNetwork/obol-manager-contracts/pull/87). + +### [G-02] OWR can be simplified and save gas by not tracking distributedFunds + +Currently, the `OptimisticWithdrawalRecipient` contract tracks four variables: + +- distributedFunds: total amount of the token distributed via push or pull +- fundsPendingWithdrawal: total balance distributed via pull that haven't been claimed yet +- claimedPrincipalFunds: total amount of funds claimed by the principal recipient +- pullBalances: individual pull balances that haven't been claimed yet + +When `_distributeFunds()` is called, we perform the following math (simplified to only include relevant updates): + +```solidity +endingDistributedFunds = distributedFunds - fundsPendingWithdrawal + currentBalance; +fundsToBeDistributed = endingDistributedFunds - distributedFunds; +distributedFunds = endingDistributedFunds; +``` + +As we can see, `distributedFunds` is added to the `endingDistributedFunds` variable and then removed when calculating `fundsToBeDistributed`, having no impact on the resulting `fundsToBeDistributed` value. + +The `distributedFunds` variable is not read or used anywhere else on the contract. + +#### Recommendation + +We can simplify the math and save substantial gas (a storage write plus additional operations) by not tracking this value at all. + +This would allow us to calculate `fundsToBeDistributed` directly, as follows: + +```solidity +fundsToBeDistributed = currentBalance - fundsPendingWithdrawal; +``` + +#### Review + +Fixed as recommended in [PR 85](https://github.com/ObolNetwork/obol-manager-contracts/pull/85). + +### [I-01] Strong trust assumptions between validators and node operators + +It is assumed that validators and node operators will always act in the best interest of the group, rather than in their selfish best interest. + +It is important to make clear to users that there are strong trust assumptions between the various parties involved in the DVT. + +Here are a select few examples of attacks that a malicious set of node operators could perform: + +1) Since there is currently no mechanism for withdrawals besides the consensus of the node operators, a minority of them sufficient to withhold consensus could blackmail the principal for a payment of up to 16 ether in order to allow them to withdraw. Otherwise, they could turn off their node operators and force the principal to bleed down to a final withdrawn balance of just over 16 ether. + +2) Node operators are all able to propose blocks within the P2P network, which are then propogated out to the rest of the network. Node software is accustomed to signing for blocks built by block builders based on the metadata including quantity of fees and the address they'll be sent to. This is enforced by social consensus, with block builders not wanting to harm validators in order to have their blocks accepted in the future. However, node operators in a DVT are not concerned with the social consensus of the network, and could therefore build blocks that include large MEV payments to their personal address (instead of the DVT's 0xSplit), add fictious metadata to the block header, have their fellow node operators accept the block, and take the MEV for themselves. + +3) While the withdrawal address is immutably set on the beacon chain to the OWR, the fee address is added by the nodes to each block. Any majority of node operators sufficient to reach consensus could create a new 0xSplit with only themselves on it, and use that for all execution layer fees. The principal (and other node operators) would not be able to stop them or withdraw their principal, and would be stuck with staked funds paying fees to the malicious node operators. + +Note that there are likely many other possible attacks that malicious node operators could perform. This report is intended to demonstrate some examples of the trust level that is needed between validators and node operators, and to emphasize the importance of making these assumptions clear to users. + +#### Review + +Acknowledged. We believe EIP 7002 will reduce this trust assumption as it would enable the validator exit via the execution layer withdrawal key. + +### [I-02] Provide node operator checklist to validate setup + +There are a number of ways that the user setting up the DVT could plant backdoors to harm the other users involved in the DVT. + +Each of these risks is possible to check before signing off on the setup, but some are rather hidden, so it would be useful for the protocol to provide a list of checks that node operators should do before signing off on the setup parameters (or, even better, provide these checks for them through the front end). + +1) Confirm that `SplitsMain.getHash(split)` matches the hash of the parameters that the user is expecting to be used. + +2) Confirm that the controller clone delegates to the correct implementation. If not, it could be pointed to delegate to `SplitMain` and then called to `transferControl()` to a user's own address, allowing them to update the split arbitrarily. + +3) `OptimisticWithdrawalRecipient.getTranches()` should be called to check that `amountOfPrincipalStake` is equal to the amount that they will actually be providing. + +4) The controller's `owner` and future split including Obol fees should be provided to the user. They should be able to check that `ImmutableSplitControllerFactory.predictSplitControllerAddress()`, with those parameters inputted, results in the controller that is actually listed on `SplitsMain.getController(split)`. + +#### Review + +Acknowledged. We do some of these already (will add the remainder) automatically in the launchpad UI during the cluster confirmation phase by the node operator. We will also add it in markdown to the repo. diff --git a/versioned_docs/version-v1.2.0/adv/security/threat_model.md b/versioned_docs/version-v1.2.0/adv/security/threat_model.md new file mode 100644 index 0000000000..633d0c8bd4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/security/threat_model.md @@ -0,0 +1,155 @@ +--- +sidebar_position: 5 +description: Threat model for a Distributed Validator +--- + +# Charon Threat Model + +This page outlines a threat model for Charon, in the context of it being a Distributed Validator middleware for Ethereum validator clients. + +## Actors + +- Node owner (NO) +- Cluster node operators (CNO) +- Rogue node operator (RNO) +- Outside attacker (OA) + +## General observations + +This page describes some considerations the Obol core team made about the security of a distributed validator in the context of its deployment and interaction with outside actors. + +The goal of this threat model is to provide transparency, but it is by no means a comprehensive audit or complete security reference. It’s a sharing of the experiences and thoughts we gained during the last few years building distributed validator technologies. + +While to the Beacon Chain, a distributed validator is seen in much the same way as a regular validator, and thus retains some of the same security considerations, Charon’s threat model is different from a validator client’s threat model because of its general design. + +While a validator client owns and operates on a set of validator private keys, the design of Charon allows its node operators to rarely (if ever) see the complete validator private keys, relying instead on modern cryptography to generate partial private key shares. + +An Ethereum distributed validator employs advanced signature primitives such that no operator ever handles the full validator private key in any standard lifecycle step: the [BLS digital signature scheme](https://en.wikipedia.org/wiki/BLS_digital_signature) employed by the Ethereum network allows distributed validators to individually sign a blob of data and then aggregate the resulting signatures in a transparent manner, never requiring any of the participating parties to know the full private key to do so. + +If the subset of the available Charon nodes is lower than a given threshold, the cluster is not able to continue with its duties. + +Given the collaborative nature of a Distributed Validator cluster, every operator must prioritize the liveliness and well-being of the cluster. Charon, at the moment of writing this page cannot reward and penalize operators within a cluster independently. + +This implies that Charon’s threat model can’t quite be equated to that of a single validator client, since they work on a different - albeit similar - set of security concepts. + +## Identity private key + +A distributed validator cluster is made up of a number of nodes, often run by a number of independent operators. For each DV cluster there’s a set of Ethereum validator private keys on which they want to validate on behalf of. + +Alongside those, each node (henceforth ‘operator’) holds an SECP256K1 identity private key, referred to as an ENR, that identifies their node to the other cluster operators’ nodes. + +Exfiltration of said private key could lead to possible impersonation from an outside attacker, possibly leading to intra-cluster peering issues, eclipse attack risks, and degraded validator performance. + +Charon client communication is handled via BFT consensus, which is able to tolerate a given number of misbehaving nodes up to a certain threshold: once this threshold is reached, the cluster is not able to continue with its lifecycle and loses liveness guarantees (the validator goes offline). If more than two-thirds of nodes in a cluster are malicious, a cluster also loses safety guarantees (enough bad actors could collude to come to consensus on something slashable). + +Identity private key theft and the subsequent execution of a rogue cluster node is equivalent in the context of BFT consensus to a misbehaving node, hence the cluster can survive and continue with its duties up to what’s specified by the cluster’s BFT protocol’s parameters. + +The likelihood of this happening is low: an OA with enough knowledge of the topology of the operator’s network must steal `fault tolerance of the cluster + 1` identity private keys and run Charon nodes to subvert the distributed validator BFT consensus to push the validator offline. + +## Ethereum validator private key access + +A distributed validator cluster executes Ethereum validator duties by acting as a middleman between the beacon chain and a validator client. + +To do so, the cluster must have knowledge of the Ethereum validator’s private key. + +The design and implementation of Charon minimizes the chances of this by splitting the Ethereum validator private keys into parts, which are then assigned to each node operator. +A [distributed key generation](https://en.wikipedia.org/wiki/Distributed_key_generation) (DKG) process is used in order to evenly and safely create the private key shares without any central party having access to the full private key. + +The cryptography primitives employed in Charon can allow a threshold of the node operator’s private key shares to be reconstructed into the whole validator private key if needed. + +While the facilities to do this are present in the form of CLI commands, as stated before Charon never reconstructs the key in normal operations since BLS digital signature system allows for signature aggregation. + +A distributed validator cluster can be started in two ways: + +1. An existing Ethereum validator private key is split by the private key holder, and distributed in a trusted manner among the operators. +2. The operators participate in a distributed key generation (DKG) process, to create private key shares that collectively can be used to sign validation duties as an Ethereum distributed validator. The full private key for the cluster never exists in one place during or after the DKG. + +In case 1, one of the node operators K has direct access to the Ethereum validator key and is tasked with the generation of other operator’s identity keys and key shards. + +It is clear that in this case the entirety of the sensitive material set is as secure as K’s environment; if K is compromised or malicious, the distributed validator could be slashed. + +Case 2 is different, because there’s no pre-existing Ethereum validator key in a single operator's hands: it will be generated using the FROST DKG algorithm. + +Assuming a successful DKG process, each operator will only ever handle its own key shares instead of the full Ethereum validator private key. + +A set of rogue operators composed of enough members to reconstruct the original Ethereum private keys might pose the risk of slashing for a distributed validator by colluding to produce slashable messages together. + +We deem this scenario’s likelihood as low, as it would mean that node operators decided to willfully slash the stake that they should be being rewarded for staking. + +Still, in the context of an outside attack, purposefully slashing a validator would mean stealing multiple operator key shares, which in turn means violating many cluster operator’s security almost at the same time. This scenario may occur if there is a 0-day vulnerability in a piece of software they all run or in case of node misconfiguration. + +## Rogue node operator + +Nodes are connected by means of either relay nodes, or directly to one another. + +Each node operator is at risk of being impeded by other nodes or by the relay operator in the execution of their duties. + +Nodes need to expose a set of TCP ports to be able to work, and the mere fact of doing that opens up the opportunity for rogue parties to execute DDoS attacks. + +Another attack surface for the cluster exists in rogue nodes purposefully filling the various inter-state databases with meaningless data, or more generally submitting bogus information to the other parties to slow down the processing or, in the case of a sybil attack, bring the cluster to a halt. + +The likelihood of this scenario is medium, because there’s no active threat hunting part: there’s no need for the rogue node operator to penetrate and compromise other nodes to disturb the cluster’s lifecycle. + +## Outside attackers interfering with a cluster + +There are two levels of sophistication in an OA: + +1. No knowledge of the topology of the cluster: The attacker doesn’t know where each cluster node is located and so can’t force fault tolerance +1 nodes offline if it can’t find them. +2. Knowledge of the topology of the network (or part of it) is possessed: the OA can operate DDoS attacks or try breaking into node’s servers - at that point, the “rogue node operator” scenario applies. + +The likelihood of this scenario is low: an OA needs extensive capabilities and sufficient incentive to be able to carry out an attack of this size. + +An outside attacker could also find and use vulnerabilities in the underlying cryptosystems and cryptography libraries used by Charon and other Ethereum clients. Forging signatures that fool Charon’s cryptographic library or other dependencies may be feasible, but forging signatures or otherwise finding a vulnerability in either the SECP256K1+ECDSA or BLS12-381+BLS cryptosystems we deem to be a low likelihood risk. + +## Malicious beacon nodes + +A malicious beacon node (BN) could prevent the distributed validator from operating its validation duties, and could plausibly increase the likelihood of slashing by serving Charon illegitimate information. + +If the amount of nodes configured with the malicious BN are equal to the byzantine threshold for the Charon BFT consensus protocol, the validation process can potentially halt since the BFT parameter threshold is reached - most of the nodes are byzantine - the system will reach consensus on a set of data that isn’t valid. + +We deem the likelihood of this scenario to be medium depending on the trust model associated with the BNs deployment (cloud, self-hosted, SaaS product): node operators should always host or at least trust their own beacon nodes. + +## Malicious Charon relays + +A Charon relay is used as a communication bridge between nodes that aren’t directly exposed on the Internet. It also acts as the peer discovery mechanism for a cluster. + +Once a peer’s IP address has been discovered via the relay, a direct connection can be attempted. Nodes can either communicate by exchanging data through a relay, or by using the relay as a means to establish a direct TCP connection to one another. + +A malicious relay owned by a OA could lead to: + +- Network topology discovery, facilitating the “outside attackers interactions with a cluster” scenario +- Impeding node communication, potentially impacting the BFT consensus protocol liveness (not security) and distributed validator duties +- DKG process disruption leading to frustration and potential abandonment by node operators: could lead to the usage of a standard Ethereum validator setup, which implies weaker security overall + +We note that BFT consensus liveness disruption can only happen if the number of nodes using the malicious relay for communication is equal to the byzantine nodes amount defined in the consensus parameters. + +This risk can be mitigated by configuring nodes with multiple relay URLs from only [trusted entities](../advanced/self-relay.md). + +The likelihood of this scenario is medium: Charon nodes are configured with a default set of relay nodes, so if an OA were to compromise those, it would lead to many cluster topologies getting discovered and potentially attacked and disrupted. + +## Compromised runtime files + +Charon operates with two runtime files: + +- A lock file used to address operator’s nodes, define the Ethereum validator public keys and the public key shares associated with it +- A cluster definition file used to define the operator’s addresses and identities during the DKG process + +The lock file is signed and validated by all the nodes participating in the cluster: assuming good security practices on the node operator side, and no bugs in Charon or its dependencies’ implementations, this scenario is unlikely. + +If one or more node operators are using less than ideal security practices an OA could rewire the Charon CLI flags to include the `--no-verify` flags, which disables lock file signature and hash verification (usually intended only for development purposes). + +By doing that, the OA can edit the lock file as it sees fit, leading to the “rogue node operator” scenario. An OA or RNO might also manage to social engineer their way into convincing other operators into running their malicious lock file with verification disabled. + +The likelihood of this scenario is low: an OA would need to compromise every node operator through social engineering to both use a different set of files, and to run its cluster with `--no-verify`. + +## Conclusions + +Distributed Validator Technology (DVT) helps maintain a high-assurance environment for Ethereum validators by leveraging modern cryptography to ensure no single point of failure is easily found in the system. + +As with any computing system, security considerations are to be expected in order to keep the environment safe. + +From the point of view of an Ethereum validator entity, running their services with a DV client can help greatly with availability, minimizing slashing risks, and maximizing participation in the network. + +On the other hand, one must take into consideration the risks involved with dishonest cluster operators, as well as rogue third-party beacon nodes or relay providers. + +In the end, we believe the benefits of DVT greatly outweigh the potential threats described in this overview. diff --git a/versioned_docs/version-v1.2.0/adv/troubleshooting/_category_.json b/versioned_docs/version-v1.2.0/adv/troubleshooting/_category_.json new file mode 100644 index 0000000000..9b64570467 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/troubleshooting/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Troubleshooting", + "position": 2, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/adv/troubleshooting/client_configurations.md b/versioned_docs/version-v1.2.0/adv/troubleshooting/client_configurations.md new file mode 100644 index 0000000000..b0416404af --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/troubleshooting/client_configurations.md @@ -0,0 +1,82 @@ +--- +sidebar_position: 3 +description: A reference for extra configuration of Ethereum Clients when running in DVs. +--- + +# Client Configuration + +Many execution, consensus, and validator clients need custom flags or parameters to work best with Distributed Validators. These settings are often dispersed across a number of documentation pages or example repos. This page aims to be a reference for each client and the specific additions they may require. + + + + +## Lighthouse + +### Consensus Client + +Nothing specific for distributed validators is required. If you are configuring MEV-boost, consult the settings you need [here](../../run/start/quickstart-builder-api.mdx#consensus-clients). + +### Validator Client + +Required flags: +```shell +--distributed +``` + +## Lodestar + +### Consensus Client + +Nothing specific for distributed validators is required. If you are configuring MEV-boost, consult the settings you need [here](../../run/start/quickstart-builder-api.mdx#consensus-clients). + +### Validator Client + +Required flags: +```shell +--distributed +``` + +## Nimbus + +### Consensus Client + +When running a Nimbus Consensus Client you must add the following flag **to `charon run`**: +```shell +--feature-set-enable=json_requests +``` + +### Validator Client + +Required flags: +```shell +--distributed +``` + +## Prysm + +### Consensus Client + +Nothing specific for distributed validators is required. If you are configuring MEV-boost, consult the settings you need [here](../../run/start/quickstart-builder-api.mdx#consensus-clients). + +### Validator Client + +Required flags: +```shell +--distributed +``` + +## Teku + +### Consensus Client + +Required flags: +```shell +--validators-graffiti-client-append-format=DISABLED +``` + +### Validator Client + +Required flags: +```shell +--Xobol-dvt-integration-enabled +``` \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/adv/troubleshooting/dkg_failure.md b/versioned_docs/version-v1.2.0/adv/troubleshooting/dkg_failure.md new file mode 100644 index 0000000000..9d1e418d12 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/troubleshooting/dkg_failure.md @@ -0,0 +1,86 @@ +--- +sidebar_position: 2 +description: Handling DKG failure +--- + +# Handling DKG Failure + +While the DKG process has been tested and validated against many different configuration instances, it can still encounter issues which might result in failure. + +Our DKG is designed in a way that doesn't allow for inconsistent results: either it finishes correctly for every peer, or it fails. + +This is a **safety** feature: you don't want to deposit an Ethereum distributed validator that not every operator is able to participate in. + +The most common source of issues lies in the network stack: if any of the peers' Internet connection glitches substantially, the DKG will fail. If you are attempting to run the `dkg` command in two places at once, or you have a `charon run` command with the same `charon-enr-private-key` as you are trying to DKG with, these may also disrupt a key generation ceremony. + +Charon's DKG doesn't allow peer reconnection once the process is started, but it does allow for re-connections before that. + +When you see the following message: + +```log +14:08:34.505 INFO dkg Waiting to connect to all peers... +``` + +this means your Charon instance is waiting for all the other cluster peers to start their DKG process: at this stage, peers can disconnect and reconnect at will, the DKG process will still continue. + +A log line will confirm the connection of a new peer: + +```log +14:08:34.523 INFO dkg Connected to peer 1 of 3 {"peer": "fantastic-adult"} +14:08:34.529 INFO dkg Connected to peer 2 of 3 {"peer": "crazy-bunch"} +14:08:34.673 INFO dkg Connected to peer 3 of 3 {"peer": "considerate-park"} +``` + +As soon as all the peers are connected, this message will be shown: + +```log +14:08:34.924 INFO dkg All peers connected, starting DKG ceremony +``` + +Past this stage **no disconnections are allowed**, and _all peers must leave their terminals open_ in order for the DKG process to complete: this is a synchronous phase, and every peer is required in order to reach completion. + +If for some reason the DKG process fails, you would see error logs that resemble this: + +```log +14:28:46.691 ERRO cmd Fatal error: sync step: p2p connection failed, please retry DKG: context canceled +``` + +As the error message suggests, the DKG process needs to be retried. + +## Cleaning up the `.charon` directory + +One cannot simply retry the DKG process: Charon refuses to overwrite any runtime file in order to avoid inconsistencies and private key loss. + +When attempting to re-run a DKG with an unclean data directory - which is either `.charon` or what was specified with the `--data-dir` CLI parameter - this is the error that will be shown: + +```log +14:44:13.448 ERRO cmd Fatal error: data directory not clean, cannot continue {"disallowed_entity": "cluster-lock.json", "data-dir": "/compose/node0"} +``` + +The `disallowed_entity` field lists all the files that Charon refuses to overwrite, while `data-dir` is the full path of the runtime directory the DKG process is using. + +In order to retry the DKG process one must delete the following entities, if present: + +- `validator_keys` directory +- `cluster-lock.json` file +- `deposit-data.json` file + +:::warning +The `charon-enr-private-key` file **must be preserved**, failure to do so requires the DKG process to be restarted from the beginning by creating a new cluster definition. +::: + +If you're doing a DKG with a custom cluster definition - for example, create with `charon create dkg`, rather than the Obol Launchpad - you can re-use the same file. + +Once this process has been completed, the cluster operators can retry a DKG. + +## Further debugging + +If you are trying to create an extremely large, geographically diverse cluster, there is a chance the process could be timing out. Consider adding the flags `--timeout=5m --shutdown-delay=60s` to allow more time for the ceremony to complete and safely shut down across all nodes. + +If for some reason the DKG process still fails, node operators are advised to reach out to the Obol team by opening an [issue](https://github.com/ObolNetwork/charon/issues), detailing the troubleshooting steps that were taken and providing **debug logs**. + +To enable debug logs, first clean up the Charon data directory as explained in [the previous section](#cleaning-up-the-charon-directory), then run your DKG command while appending `--log-level=debug` at the end. + +In order for the Obol team to debug your issue as quickly and precisely as possible, please provide full logs in text form, not through screenshots or display photos. + +Providing complete debug logs from all peers is particularly important, since it allows the team to reconstruct precisely what happened throughout the ceremony. diff --git a/versioned_docs/version-v1.2.0/adv/troubleshooting/errors.mdx b/versioned_docs/version-v1.2.0/adv/troubleshooting/errors.mdx new file mode 100644 index 0000000000..1f93600b70 --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/troubleshooting/errors.mdx @@ -0,0 +1,694 @@ +--- +sidebar_position: 1 +description: Errors & Resolutions +--- + +# Errors & Resolutions + +All operators should try to restart their nodes and should check if they are on the latest stable version before attempting anything other configuration change as we are still in beta and frequently releasing fixes. You can restart and update with the following commands: + +```shell +docker compose down +git pull +docker compose up +``` + +You can check your logs using + +```shell +docker compose logs +``` + +
+ +

ENRs & Keys

+
+
+ +

What is an ENR?

+
+

+ An ENR is shorthand for an{" "} + Ethereum Node Record. + It is a way to represent a node on a public network, with a reliable + mechanism to update its information. +
+
+ At Obol we use ENRs to identify Charon nodes to one another such that they + can form clusters with the right Charon nodes and not impostors. ENRs have + private keys they use to sign updates to the + data contained + in their ENR. This private key is by default found at + .charon/charon-enr-private-key + , and should be kept secure, and not checked into version control. +
+
+ An ENR looks something like this: +

+        
+          enr:-JG4QAgAOXjGFcTIkXBO30aUMzg2YSo1CYV0OH8Sf2s7zA2kFjVC9ZQ_jZZItdE8gA-tUXW-rWGDqEcoQkeJ98Pw7GaGAYFI7eoegmlkgnY0gmlwhCKNyGGJc2VjcDI1NmsxoQI6SQlzw3WGZ_VxFHLhawQFhCK8Aw7Z0zq8IABksuJEJIN0Y3CCPoODdWRwgj6E
+        
+      
+

+
+
+ +

+ How do I get my ENR if I want to generate it again? +

+
+
    +
  • + cd to the directory where your private keys are located + (ex: cd /path/to/charon/enr/private/key) +
  • +
  • + Run{" "} + + docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 enr + + . This prints the ENR on your screen.{" "} +
  • +
  • + Please note that this ENR is not the same as the one generated when you + created it for the first time. This is because the process of generating + ENRs includes the current timestamp. +
  • +
+
+
+ +

+ {" "} + What do I do if lose my charon-enr-private-key?{" "} +

+
+
    +
  • + {" "} + For now, ENR rotation/replacement is not supported, it will be supported + in a future release.{" "} +
  • +
  • + {" "} + Therefore, it's advised to always keep a backup of your + charon-enr-private-key + in a secure location (ex: cloud storage, USB Flash drive, etc.).{" "} +
  • +
+
+
+ +

I can't find the keys anywhere

+
+
    +
  • + The charon-enr-private-key is generated inside a hidden + folder .charon.{" "} +
  • +
  • + To view it, run ls -al in your terminal.{" "} +
  • +
  • + You can then copy the key to your ~/Downloads folder for + easy access by running{" "} + cp .charon/charon-enr-private-key ~/Downloads. This step + maybe a bit different for Windows.{" "} +
  • +
  • + Else, if you are on macOS, press Cmd + Shift + . to view + the .charon folder in the Finder application.{" "} +
  • +
+
+
+
+ +

Lighthouse

+
+
+ +

Downloading historical blocks

+
+

+ This means that Lighthouse is still syncing which will throw a lot of + errors down the line. Wait for the sync before moving further. +

+
+
+ +

+ Failed to request attester duties error +

+
+

+ Indicates there is something wrong with your Lighthouse beacon node. This + might be because the request buffer is full as your node is never starting + consensus since it never gets the duties. +

+
+
+ +

+ Not enough time for a discovery seach error +

+
+

+ This could be linked to a internet connection being too slow or relying on + a slow third-party service such as Infura. +

+
+
+
+ +

Beacon Node

+
+
+ +

+ Error communicating with Beacon Node API &{" "} + Error while connecting to beacon node event stream +

+
{" "} + This is likely due to Lighthouse not done syncing, wait and try again once + synced. Can also be linked to Teku keystore issue. +
+
+ +

Clock sync issues

+
{" "} + Either your clock server time is off, or you are talking to a remote beacon + client that is super slow (this is why we advise against using services like + Infura). +
+
+ +

+ My beacon node API is flaky with lots of errors and timeouts +

+
+ A good quality beacon node API is critical to validator performance. It is always + advised to run your own beacon node to ensure low latencies to boost validator + performance. +
+
+ Using 3rd party services like Infura's beacon node API has significant + disadvantages since the quality is often low. Requests often return 500s or + timeout (Charon times out after 2s). This results in lots of warnings and + errors and failed duties. Running a local beacon node is always preferred. + We are not yet considering increasing the 2s timeout since that can have + knock-on effects. +
+
+
+ +

Charon

+
+
+ +

+ Attester failed in consensus component error +

+
{" "} + The required number of operators defined in your cluster-lock file is + probably not online to sign successfully. Make sure all operators are + running the latest version of Charon. To check if some peers are not online:{" "} + + {" "} + docker logs charon-distributed-validator-node-charon-1 2>&1 | grep 'absent'{" "} + +
+
+ +

+ Load private key error +

+
{" "} + Make sure you have successfully run a DKG before running the node. The key + should be created and placed in the right directory during the ceremony. + Also, make sure you are working in the right directory:{" "} + charon-distributed-validator-node. +
+
+ +

+ Failed to confirm node connection error +

+
{" "} + Wait for Teku & Lighthouse sync to be complete. +
+
+ +

+ Reserve relay circuit: reservation failed error +

+
+ RESERVATION_REFUSED is returned by the libp2p relay when some maximum + limit has been reached. This is most often due to "maximum reservations per IP/peer". + This is when your Charon node is restarting or in some error loop and constantly + attempting to create new relay reservations reaching the maximum. +
+
+ To fix this error, stop your Charon node for 30mins before restarting it. + This should allow the relay enough time to reset your IP/peer limits and + should then allow new reservations. This could also be due to the relay + being overloaded in general, so reaching a server wide "maximum connections" + limit. This is an issue with relay scalability and we are working in a long + term fix for this. +
+
+ +

+ Error opening relay circuit: NO_RESERVATION error +

+
+ Error opening relay circuit NO_RESERVATION (204) indicates the peer + isn't connected to the relay, so the the Charon client cannot connect to the + peer via the relay. That might be because the peer is offline or the peer is + configured to connect to a different relay. +
+
+ To fix this error, ensure the peer is online and configured with the exact + same --p2p-relays flag. +
+
+ +

+ Couldn't fetch duty data from the beacon node error +

+
+ msgFetcher indicates a duty failed in the fetcher component when + it failed to fetch the required data from the beacon node API. This indicates + a problem with the upstream beacon node. +
+
+ +

+ Couldn't aggregate attestation due to failed attester duty{" "} + error +

+
+ msgFetcherAggregatorNoAttData indicates an attestation aggregation + duty failed in the fetcher component since it couldn't fetch the prerequisite + attestation data. This indicates the associated attestation duty failed to obtain + a cluster agreed upon value. +
+
+ +

+ + Couldn't aggregate attestation due to insufficient partial v2 + committee subscriptions + {" "} + error +

+
+ msgFetcherAggregatorZeroPrepares indicates an attestation aggregation + duty failed in the fetcher component since it couldn't fetch the prerequisite + aggregated v2 committee subscription. This indicates the associated prepare aggregation + duty failed due to no partial v2 committee subscription submitted by the cluster + validator clients. +
+
+ +

+ + Couldn't aggregate attestation due to failed prepare aggregator duty + {" "} + error +

+
+ msgFetcherAggregatorFailedPrepare indicates an attestation aggregation + duty failed in the fetcher component since it couldn't fetch the prerequisite + aggregated v2 committee subscription. This indicates the associated prepare aggregation + duty failed. +
+
+ +

+ + Couldn't propose block due to insufficient partial randao signatures + {" "} + error +

+
+ msgFetcherProposerFewRandaos indicates a block proposer duty failed + in the fetcher component since it couldn't fetch the prerequisite aggregated + RANDAO. This indicates the associated randao duty failed due to insufficient + partial randao signatures submitted by the cluster validator clients. +
+
+ +

+ + Couldn't propose block due to zero partial randao signatures + {" "} + error +

+
+ msgFetcherProposerZeroRandaos indicates a block proposer duty failed + in the fetcher component since it couldn't fetch the prerequisite aggregated + RANDAO. This indicates the associated randao duty failed due to no partial randao + signatures submitted by the cluster validator clients. +
+
+ +

+ Couldn't propose block due to failed randao duty error +

+
+ msgFetcherProposerZeroRandaos indicates a block proposer duty failed + in the fetcher component since it couldn't fetch the prerequisite aggregated + RANDAO. This indicates the associated randao duty failed. +
+
+ +

+ Consensus algorithm didn't complete error +

+
+ msgConsensus indicates a duty failed in consensus component. This + could indicate that insufficient honest peers participated in consensus or p2p + network connection problems. +
+
+ +

+ Signed duty not submitted by local validator client error +

+
+ msgValidatorAPI indicates that partial signature were never submitted + by the local validator client. This could indicate that the local validator client + is offline, or has connection problems with Charon, or has some other problem. + See validator client logs for more details. +
+
+ +

+ + Bug: partial signature database didn't trigger partial signature + exchange + {" "} + error +

+
+ msgParSigDBInternal indicates a bug in the partial signature database + as it is unexpected. +
+
+ +

+ No partial signatures received from peers error +

+
+ msgParSigEx indicates that no partial signature for the duty was + received from any peer. This indicates all peers are offline or p2p network connection + problems. +
+
+ +

+ + Insufficient partial signatures received, minimum required threshold + not reached + {" "} + error +

+
+ msgParSigDBThreshold indicates that insufficient partial signatures + for the duty was received from peers. This indicates problems with peers or p2p + network connection problems. +
+
+ +

+ + Bug: threshold aggregation of partial signatures failed due to + inconsistent signed data + {" "} + error +

+
+ msgSigAgg indicates that BLS threshold aggregation of sufficient + partial signatures failed. This indicates inconsistent signed data. This indicates + a bug in Charon as it is unexpected. +
+
+ +

+ + Existing private key lock file found, another charon instance may be + running on your machine + {" "} + error +

+
+ When you turn on the --private-key-file-lock option in Charon, it + checks for a special file called the private key lock file. This file has the + same name as the ENR private key file but with a .lock extension. + If the private key lock file exists and is not older than 5 seconds, Charon won't + run. It doesn't allow running multiple Charon instances with the same ENR private + key. If the private key lock file has a timestamp older than 5 seconds, Charon + will replace it and continue with its work. If you're sure that no other Charon + instances are running, you can delete the private key lock file. +
+
+ +

+ + Validator api 5xx response: mismatching validator client key share + index, Mth key share submitted to Nth charon peer + {" "} + error +

+
+

+ The issue revolves around an invalid setup or deployment, where the + validators private key shares don't match the ENR private key. There may + have been a mix-up during deployment, leading to a mismatching validator + client key share index. +

+

For example:

+
    +
  • +

    + Imagine node N is Alice, and node M is Bob, the error would read:{" "} + + mismatching validator client key share index, Bob's key share + submitted to Alice's charon node + + . +

    +
  • +
  • + Bob's private key share(s) are imported to a VC that is connected to + Alice's Charon node. This is a invalid setup/deployment. Alice's Charon + node should only be connected to Alice's VC. +
  • +
  • + Check the partial public key shares of each node inside + cluster-lock.json and see that matches with the public key inside{" "} + node(num)/validator_keys/keystore-0.json. +
  • +
+
+
+
+ +

Teku

+
+
+ +

+ Teku keystore file error{" "} +

+
{" "} + Teku sometimes logs an error which looks like{" "} + + Keystore file /opt/charon/validator_keys/keystore-0.json.lock already in + use + + . This can be solved by deleting the file(s) ending with .lock in + the folder .charon/validator_keys. It is caused by an unsafe shut + down of Teku (usually by double pressing Ctrl+C to shutdown containers + faster). +
+
+
+ +

Grafana

+
+
+ +

+ {" "} + How to fix the Grafana dashboard?{" "} +

+
{" "} + Sometimes, Grafana dashboard doesn't load any data first time around. You + can solve this by following the steps below:{" "} +
    +
  • Click the Wheel Icon > Datasources.
  • +
  • Click prometheus.
  • +
  • + Change the "Access" field from Server (default) to{" "} + Browser. Press "Save & Test". It should fail.{" "} +
  • +
  • + Change the "Access" field back to Server (default) and + press "Save & Test". You should be presented with a green success icon + saying "Data source is working" and you can return to the dashboard + page.{" "} +
  • +
+
+
+ +

+ N/A & No data in validator info panel +

+
{" "} + Can be linked to a Teku keystore issue. +
+
+
+ +

Prometheus

+
+
+ +

+ Unauthorized: authentication error: invalid token +

+
{" "} + You can ignore this error unless you have been contacted by the Obol Team + with monitoring credentials. In that case, follow{" "} + Getting Started Monitoring your Node in + our advanced guides. It does not affect cluster performance or prevent the + cluster from running. +
+
+
+ +

Docker

+
+
+ +

+ {" "} + How to fix permission denied errors?{" "} +

+
{" "} + Permission denied errors can come up in a variety of manners, particularly + on Linux and WSL for Windows systems. In the interest of security, the + charon docker image runs as a non-root user, and this user often does not + have the permissions to write in the directory you have checked out the code + to. This can be generally be fixed with some of the following:{" "} +
    +
  • + Running docker commands with sudo, if you haven't{" "} + + setup docker to be run as a non-root user + + .{" "} +
  • +
  • + Changing the permissions of the .charon folder with the + commands:{" "} +
  • +
      +
    • + mkdir .charon (if it doesn't already exist); +
    • +
    • + sudo chmod -R 666 .charon. +
    • +
    +
+
+
+ +

+ {" "} + I see a lot of errors after running docker compose up +

+
{" "} + It's because both Geth and Lighthouse start syncing and so there's + connectivity issues among the containers. Simply let the containers run for + a while. You won't observe frequent errors when Geth finishes syncing. You + can also add a second beacon node endpoint for something like Infura by + adding a comma separated API URL to the end of{" "} + CHARON_BEACON_NODE_ENDPOINTS in the docker-compose.yml. +
+
+ +

+ {" "} + How do I fix the plugin "loki" not found error? +

+
{" "} + If you get the following error when calling docker compose up: +
+ + Error response from daemon: error looking up logging plugin loki: plugin + "loki" not found + + .
+ Then it probably means that the Loki docker driver isn't installed. In that + case, run the following command to install loki: +
+ + docker plugin install grafana/loki-docker-driver:latest --alias loki + --grant-all-permissions + + . +
+
+
+ +

Relay

+
+
+ +

+ + {" "} + Resolve IP of p2p external host flag: lookup replace.with.public.ip.or.hostname: + no such host{" "} + {" "} + error +

+
{" "} + Replace replace.with.public.ip.or.hostname in the + relay/docker-compose.yml with your real public IP or DNS hostname. +
+
+ +

+ Timeout resolving bootnode ENR: context deadline exceeded {" "} + error +

+
{" "} + The relay you are trying to connect to your peers via is offline or + unreachable. +
+
+
+ +

Lodestar

+
+
+ +

+ warn: Potential next epoch attester duties reorg error +

+
{" "} + Lodestar logs these warnings because Charon is not able to return proper{" "} + dependent_root value in getAttesterDuties API + response whenever Lodestar calls this API. This is because Charon uses{" "} + go-eth2-client for all the beacon API calls and it doesn't + provide dependent_root value in responses. We have reported + this to them{" "} + here. +
+
diff --git a/versioned_docs/version-v1.2.0/adv/troubleshooting/test_command.md b/versioned_docs/version-v1.2.0/adv/troubleshooting/test_command.md new file mode 100644 index 0000000000..014a944efd --- /dev/null +++ b/versioned_docs/version-v1.2.0/adv/troubleshooting/test_command.md @@ -0,0 +1,161 @@ +--- +sidebar_position: 3 +description: Troubleshoot issues spotted by the test command +--- + +# Test Commands + +This page aims to give guidance on the causes, and potential for troubleshooting or improvement, of failed tests or low test scores from the [Charon Test commands](../../run/prepare/test-command.mdx). + +## Peers + +### Charon Peers + +#### Ping + +- Peers might have not started their nodes or are not reachable. + +#### PingMeasure + +- Peer might be too far away (geographically) from you. +- If the connection to the peer is indirect, the route is from your node, to the relay, to the peer. Meaning you are measuring the travel time from you to the relay, and from the relay to the peer: (your node -> relay -> peer). This means, even if your peer's node is right next to yours, if the connection is being transmitted through a relay far away, the latency between your nodes might be too high to be effective. +- Your general network latency to the public internet might be high. Verify with the [`charon test infra`](../../run/prepare/test-command.mdx#test-machine-and-network-performance) tests. +- If the connection to the peer is indirect, there is a potential that the relay might be overloaded or under-resourced, consider adding [alternative relays](../../adv/security/risks.md#risk-obol-hosting-the-relay-infrastructure), or preferably [opening charon's p2p port](../../learn/charon/networking.mdx#libp2p-relays-and-peer-discovery) to the internet to establish direct peer to peer connections. + +#### PingLoad + +Same causes as PingMeasure test apply here. + +#### DirectConn + +- Your or your peer's port might not be publicly exposed. +- Your or your peer's port might be behind a firewall. +- Your or your peer's port might be behind a strict NAT gateway. + +### Charon Relays + +#### PingRelay + +- Relay might be down or un-conctactable for other reasons. + +#### PingMeasureRelay + +- Relay might be under heavy load. +- Your network latency might be high. Verify with the `charon test infra` tests. + +### Self + +#### Libp2pTCPPortOpenTest + +- There might be another process running on the designated port (tcp/3610 by default). +- The process might have died. + +## Beacon + +#### Ping + +- Beacon node might not be started or is not reachable. + +#### PingMeasure + +- Beacon node might be too far away (geographically) from you. +- Your network latency might be high. Verify with the `charon test infra` tests. + +#### Version + +- The beacon node version is not compatible with charon. + +#### IsSynced + +- Beacon node is not synced to the network. + +#### PeerCount + +- Beacon node does not have enough peers. This may result in slower fetching and broadcasting of slots and duties. + +#### PingLoad + +This is a load test, to enable it add the `--load-test` flag. + +Same causes as PingMeasure test apply here. + +#### Simulation + +This is a load test, to enable it add the `--load-test` flag. + +Same causes as PingMeasure test apply here and additionally: + +- The infrastructure on which the beacon node runs (amount of RAM, disk IOPS) might not be enough to handle the number of simulated validators supplied in this test. + +## Validator + +#### Ping + +- Validator client might not be started or is not reachable. + +#### PingMeasure + +- Validator client might be too far away (geographically) from the charon client. Generally a low latency between a validator client and its charon client is important for timely signing. + +#### PingLoad + +Same causes as PingMeasure test apply here. + +## MEV + +#### Ping + +- MEV relay might not be started or is not reachable. + +#### PingMeasure + +- MEV relay might be too far away (geographically) from you. +- Your network latency might be high. Verify with the `charon test infra` tests. + +#### CreateBlock + +Same causes as PingMeasure test apply here and additionally: + +- MEV relay might be too slow in block production. + +#### CreateMultipleBlocks + +Same causes as CreateBlock test apply here. + +## Infra + +#### DiskWriteSpeed + +- Read more in our [Deployment Best Practices](../../run/prepare/deployment-best-practices#hardware-specifications). + +#### DiskWriteIOPS + +- Read more in our [Deployment Best Practices](../../run/prepare/deployment-best-practices#hardware-specifications). + +#### DiskReadSpeed + +- Read more in our [Deployment Best Practices](../../run/prepare/deployment-best-practices#hardware-specifications). + +#### DiskReadIOPS + +- Read more in our [Deployment Best Practices](../../run/prepare/deployment-best-practices#hardware-specifications). + +#### AvailableMemory + +- Your available memory (RAM) is not enough to run Charon. The minimum available memory should be 2GB, the recommended available memory is 4GB. Note that this test is a best estimate, as memory availability can be hard to predict, particularly if the command is run in a virtualised environment (i.e.: a Docker container). + +#### TotalMemory + +- Your total memory (RAM) may not be enough to run a full validating node. The recommended minimum total memory is 16GB. Specialised, or optimised deployments can use less RAM than the recommended minimum, but may require some monitoring to assert sufficient stability and performance. Read more in our [Deployment Best Practices](../../run/prepare/deployment-best-practices#hardware-specifications) + +#### InternetLatency + +- Your internet latency to the nearest server is too high. Latency is expected to be at least less than 50ms and at best less than 20ms. + +#### InternetDownloadSpeed + +- Your internet download speed from the nearest test server is too low. Download speed is expected to be at least above 10Mb/s and at best above 50Mb/s. + +#### InternetUploadSpeed + +- Your internet upload speed to the nearest test server is too low. Upload speed is expected to be at least above 10Mb/s and at best above 50Mb/s. diff --git a/versioned_docs/version-v1.2.0/gov/_category_.json b/versioned_docs/version-v1.2.0/gov/_category_.json new file mode 100644 index 0000000000..3e0c2c3cce --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "COMMUNITY & GOVERNANCE", + "position": 4, + "collapsed": false, + "collapsible": false, + "className": "menuSection" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/community/_category_.json b/versioned_docs/version-v1.2.0/gov/community/_category_.json new file mode 100644 index 0000000000..f3dffd1ffe --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/community/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Community", + "position": 2, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/community/staking-masters.md b/versioned_docs/version-v1.2.0/gov/community/staking-masters.md new file mode 100644 index 0000000000..26acfa86c1 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/community/staking-masters.md @@ -0,0 +1,26 @@ +--- +description: Information about the Staking Mastery Program +sidebar_position: 1 +--- + +# Staking Mastery Program + +Information about the Staking Mastery Program can be found at https://squadstaking.com/mastery. + +## Achieving staking mastery + +The Staking Mastery program is a carefully curated cohort based program designed to empower and promote individuals who are passionate about advancing staking adoption through research, development and/or education. + +![Staking Master](/img/StakingMaster.png) + +In ancient Greece, masters were esteemed for their expertise and their role as mentors. A master would guide an apprentice through rigorous training and intellectual development. They often performed research, led workshops, or built entire guilds. This master-apprentice relationship was fundamental to the transmission of skills and knowledge in ancient Greek society. + +We're empowering the next generation of masters, the Ethereum Staking Masters. + +## How does it work? + +1. **Apply:** For those who are passionate about advancing Ethereum staking adoption through research, development and/or education. +2. **Interview:** Outstanding applicants will be interviewed to discuss their unique skills and how those can be best applied to advancing Ethereum staking adoption. +3. **Lead:** Selected Staking Masters will lead a project of their choosing for the length of their cohort, with support and recognition from DV Labs. + +Apply now at [squadstaking.com/mastery](https://squadstaking.com/mastery)! \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/community/techne.md b/versioned_docs/version-v1.2.0/gov/community/techne.md new file mode 100644 index 0000000000..0848af6895 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/community/techne.md @@ -0,0 +1,119 @@ +--- +description: Information about the Techne Credential Program. +sidebar_position: 1 +--- + +# Techne + +Information about the Techne Credential Program can be found at https://squadstaking.com/techne. + +## Welcome to the Obol Network Techne Credential 👋 + +At Obol Network, we believe in empowering our community by providing them with the tools, knowledge, and recognition they deserve. The credential is designed to provide an on-chain attestation that identifies, acknowledges, and elevates individuals who demonstrate proven experience, knowledge, and commitment within the Distributed Validators domain. + +Drawing inspiration from the rich heritage of Ancient Greece, the name "Techne" reflects ideas of art, skill, or craft, representing technical mastery. Today, Techne embodies our vision to empower and uplift individuals who not only showcase technical expertise but also actively contribute to strengthening the staking ecosystem. + +## Why Earn the Credential? + +Each Obol Techne Credential is a verifiable, non-transferable NFT credential from Obol that proves your knowledge and experience operating Obol DVs. This on-chain attestation can then be used to showcase your experience, whether it’s to the Obol Core Team, other Obol community members, or the broader staking ecosystem. The credentials are defined in different tiers to ensure a progression path towards deeper expertise while offering an accessible entry point. + +![Obol vs others table](/img/Technes.png) + +### Techne Credential Benefits + +The goal of the Obol Techne Credential is to give community validators more opportunities to become node operators. Today, many liquid staking protocols and other staking services are looking to build community-focused, permissionless, and more decentralized node operator sets. However, to be considered in those programs, validators must have proven experience and demonstrated ability to run high-performing nodes. The key benefit of the Obol Techne Credential is to give every validator to prove their knowledge and experience with Obol DVs. + +- **Recognition**: Receive a verifiable non-transferable NFT to prove and showcase your knowledge and experience running distributed validators. +- **Opportunities**: Credentialed individuals have proven experience running distributed validators, providing a path to access delegated stake in programs such as EtherFi’s Operation Solo Staker, Lido’s SimpleDVT Module and many more. + +## Program Overview + +### Eligibility +The Obol Techne Credential is open to all community members interested in gaining knowledge and experience in running distributed validators. + +### Credentialing Journey + +The credentialing journey currently takes place in waves. Each wave lasts around 8 weeks and consists of a preparation period called the *Learning Phase*, followed by a hands-on experience period, called *Experience*, during which Obol monitors performance. Before the start of a wave, you already have the opportunity to create your squad. Credentials are awarded at the end of the wave based on a snapshot taken that verifies average performance during the Experience phase. + +Also, it's important to understand the concept of Credential Tiers. Currently, there are three: the Base Credential, the Bronze Credential and the Silver Credential. Base and Bronze are earned on testnet, with Silver earned on mainnet. + +### Learning Phase & Knowledge Assessment + +During each wave of our program, you will have the opportunity to join live training sessions on our Discord. We will teach the basics of Ethereum Proof of Stake and Obol Distributed Validators (DVs), and will demonstrate how to run your first DV. There will also be a training session where you can ask questions and receive answers directly from experienced community members. Head to Discord to see the full agenda of the current wave or next wave. + +Also, don’t forget to thoroughly read the [comprehensive documentation](https://docs.obol.tech/docs/int/Overview)! + +### Practical Experience + +To obtain your Obol Techne Credential, you will need to demonstrate your abilities to **set up, run, and maintain a DV cluster for multiple weeks**. For Base and Bronze, this is around 4 weeks. For Silver, this is around 8 weeks. This is the essence of the Techne credential, to allow you to showcase your experience in an honest and verifiable manner on-chain. + +### Performance Requirements + +You not only need to setup, run and maintain your DV, but also achieve **high performance.** Distributed Validators have been shown to outperform traditional validators, while providing much more [benefits and advantages](https://blog.obol.tech/what-is-dvt-and-how-does-it-improve-staking-on-ethereum/). + +The performance requirements are different depending on the different tiers. + +- For the **Base Credential**, you will need to run a DV cluster for 4 weeks with a performance near or above the [network average](https://grafana.monitoring.gcp.obol.tech/d/adgym07d8ak1sf/techne-credentials?orgId=6) (open the link in incognito mode if the link does not work). We will take into account the overall performance of the cluster, not the individual performances of the operators. +- For the **Bronze Credential** , you will need to create and manage 50 validators for 4 weeks with a performance equal to or above the [average of the Liquid Staking Providers](https://grafana.monitoring.gcp.obol.tech/d/adgym07d8ak1sf/techne-credentials?orgId=6) (index composed of Lido, RocketPool, Coinbase Cloud, StakeWise and EtherFi). We will also take into account the individual performance of the operators (the exact requirements will be announced soon). +- For the **Silver Credential,** you will need to create and manage 1 or more validators for 8 months on mainnet, with performance at near or above the network average. We will take into account the overall performance of the cluster as well as the individual performance of the operators. + +:::For Base + Bronze (testnet), if you require delegate hETH, we ask you to set the address 0x17E6F6270A101dc7687Cc9899889819EeAF8253f as the withdrawal recipient. We will not activate validators that have not done this. At the end of the wave, you will only be eligible for the Credentials after we receive the Holesky ETH back. +::: + +### Performance Monitoring + +To track and verify the performance of your DV, you and your squad mates are required to properly setup a monitoring credential. You can learn more about setting up your monitoring credential [here](../../run/start/obol-monitoring.md). + +We are proud to share with you our [Techne Public Dashboard](https://grafana.monitoring.gcp.obol.tech/d/adgym07d8ak1sf/techne-credentials?orgId=6) (*open it in incognito mode if the link does not work*) which will allow you to track your performance and compare it to the requirements throughout your adventure. If the link does not work, open it in private browsing. + +Please note that **if you do not properly setup monitoring, you will not be eligible for any Credential**. + +### Claim your Credentials + +After a wave has ended and if you have *created*, *ran*, and successfully *exited* your cluster, you will have the right to your new Techne Credential. + +We will make an announcement on our Discord and Twitter when the credentials are available to claim. + +Base and Bronze will require you to manually claim, while Silver will be automatically airdropped. + +## Get Started* + +Start your journey without further delay. Please find here the various documents and information you will need to get started in the program: + +### [👉 Get Started Now](https://discord.com/invite/n6ebKsX46w) + +Additional Resources + +> [Quickstart Guide](https://docs.obol.tech/docs/start/quickstart_overview) +> +> +>  [Support Channel on Discord](https://discord.gg/obolnetwork) +> +> [Get Started Monitoring your Node](https://docs.obol.tech/docs/advanced/monitoring) +> + +## **Disclaimer** + +*Obol Network does not assume responsibility for any financial losses that may be incurred by individuals who choose to run on the Mainnet. Participants are advised to exercise due diligence and assess all risks associated with running on the Mainnet. Obol Network shall not be held liable for any damages, financial or otherwise, that may arise from participating in Mainnet operations.* + +## FAQ + +**It looks like there is no wave active right now, how can I earn a Techne Credential?** + +There are curerntly no active waves for earning Base or Bronze, but we plan to offer another wave in January of 2025. Currently, you still do have the ability to earn Silver. Please head to the [Obol Discord](https://discord.gg/obol) to learn more about earning a Silver Techne Credential. + +**It has been 24 hours since I filled out the form to receive my Holesky ETH, but I haven't received anything yet. Is this normal?** + +This is probably due to an unusually high number of requests. Please wait for up to 48 hours and reach out to us on Discord. Also, please note that we do not distribute any Holseky ETH on weekends. + +**Can I qualify for the program if I run all the nodes on a single machine?** + +This is not aligned with the principle of distributed validators (DVs). You must form your cluster (squad) with other humans using other machines. + +**Can I take part in the program if I’m running on Mainnet?** + +Yes, those running a DV on mainnet have the opportunity to earn the Silver Techne Credential. + +**I don’t have the Base Credential but I have enough Holesky ETH to run 50 validators, can I aim for the Bronze Techne?** + +Yes, you can. Please send a message on [Discord](https://discord.gg/obol) in the #techne-applicants channel explaining your desire to run for Bronze using your own Holesky ETH. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/contribution/_category_.json b/versioned_docs/version-v1.2.0/gov/contribution/_category_.json new file mode 100644 index 0000000000..fdeb0483e4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/contribution/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Contribution & Feedback", + "position": 3, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/contribution/bug-report.md b/versioned_docs/version-v1.2.0/gov/contribution/bug-report.md new file mode 100644 index 0000000000..c5ee5884d4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/contribution/bug-report.md @@ -0,0 +1,60 @@ +--- +description: Filing a bug report +sidebar_position: 1 +--- + +# Filing a Bug Report + +Bug reports are critical to the rapid development of Obol. In order to make the process quick and efficient for all parties, it is best to follow some common reporting etiquette when filing to avoid double issues or miscommunications. + +## Checking if your issue exists + +Duplicate tickets are a hindrance to the development process and, as such, it is crucial to first check through Charon's existing issues to see if what you are experiencing has already been indexed. + +To do so, head over to the [issue page](https://github.com/ObolNetwork/charon/issues) and enter some related keywords into the search bar. This may include a sample from the output or specific components it affects. + +If searches have shown the issue in question has not been reported yet, feel free to open up a new issue ticket. + +## Writing quality bug reports + +A good bug report is structured to help the developers and contributors visualize the issue in the clearest way possible. It's important to be concise and use comprehensive language, while also providing all relevant information on-hand. Use short and accurate sentences without any unnecessary additions, and include all existing specifications with a list of steps to reproduce the expected problem. Issues that cannot be reproduced **cannot be solved**. + +If you are experiencing multiple issues, it is best to open each as a separate ticket. This allows them to be closed individually as they are resolved. + +An original bug report will very likely be preserved and used as a record and sounding board for users that have similar experiences in the future. Because of this, it is a great service to the community to ensure that reports meet these standards and follow the template closely. + +## The bug report template + +Below is the standard bug report template used by all of Obol's official repositories. + +```shell + + +## Expected Behavior + + +## Current Behavior + + +## Steps to Reproduce + +1. +2. +3. +4. +5. + +## Detailed Description + + +## Specifications + +Operating system: +Version(s) used: + +## Possible Solution + + +## Further Information + + + ## What is Charon? + + + + ## Charon explained + ``` + +#### Bold text + +Double asterisks `**` are used to define **boldface** text. Use bold text when the reader must interact with something displayed as text: buttons, hyperlinks, images with text in them, window names, and icons. + +```markdown +In the **Login** window, enter your email into the **Username** field and click **Sign in**. +``` + +#### Italics + +Underscores `_` are used to define _italic_ text. Style the names of things in italics, except input fields or buttons: + +```markdown +Here are some American things: + +- The _Spirit of St Louis_. +- The _White House_. +- The United States _Declaration of Independence_. + +``` + +Quotes or sections of quoted text are styled in italics and surrounded by double quotes `"`: + +```markdown +In the wise words of Winnie the Pooh _"People say nothing is impossible, but I do nothing every day."_ +``` + +#### Code blocks + +Tag code blocks with the syntax of the core they are presenting: + +````markdown + ```javascript + console.log(error); + ``` +```` + +#### List items + +All list items follow sentence structure. Only _names_ and _places_ are capitalized, along with the first letter of the list item. All other letters are lowercase: + +1. Never leave Nottingham without a sandwich. +2. Brian May played guitar for Queen. +3. Oranges. + +List items end with a period `.`, or a colon `:` if the list item has a sub-list: + +1. Charles Dickens novels: + 1. Oliver Twist. + 2. Nicholas Nickelby. + 3. David Copperfield. +2. J.R.R Tolkien non-fiction books: + 1. The Hobbit. + 2. Silmarillion. + 3. Letters from Father Christmas. + +##### Unordered lists + +Use the dash character `-` for un-numbered list items: + +```markdown +- An apple. +- Three oranges. +- As many lemons as you can carry. +- Half a lime. +``` + +#### Special characters + +Whenever possible, spell out the name of the special character, followed by an example of the character itself within a code block. + +```markdown +Use the dollar sign `$` to enter debug-mode. +``` + +#### Keyboard shortcuts + +When instructing the reader to use a keyboard shortcut, surround individual keys in code tags: + +```shell +Press `ctrl` + `c` to copy the highlighted text. +``` + +The plus symbol `+` stays outside of the code tags. + +### Images + +The following rules and guidelines define how to use and store images. + +#### Storage location + +All images must be placed in the `/static/img` folder. For multiple images attributed to a single topic, a new folder within `/img/` may be needed. + +#### File names + +All file names are lower-case with dashes `-` between words, including image files: + +```text +concepts/ +├── content-addressed-data.md +├── images +│   └── proof-of-spacetime +│   └── post-diagram.png +└── proof-of-replication.md +└── proof-of-spacetime.md +``` + +_The framework and some information for this was forked from the original found on the [Filecoin documentation portal](https://docs.filecoin.io)_ diff --git a/versioned_docs/version-v1.2.0/gov/contribution/feedback.md b/versioned_docs/version-v1.2.0/gov/contribution/feedback.md new file mode 100644 index 0000000000..4ee477e502 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/contribution/feedback.md @@ -0,0 +1,12 @@ +--- +description: Feedback for us +sidebar_position: 3 +--- + + +# Feedback + +If you have followed our quickstart guides, and whether you succeeded or failed at running the distributed validator successfully, we would like to hear your feedback on the process and where you encountered difficulties. + +- Please let us know by joining and posting on our [Discord](https://discord.gg/n6ebKsX46w). +- Also, feel free to add issues to our [GitHub repos](https://github.com/ObolNetwork). diff --git a/versioned_docs/version-v1.2.0/gov/governance/_category_.json b/versioned_docs/version-v1.2.0/gov/governance/_category_.json new file mode 100644 index 0000000000..bcc42463e7 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Governance", + "position": 1, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/collective.md b/versioned_docs/version-v1.2.0/gov/governance/collective.md new file mode 100644 index 0000000000..d62e87bc6c --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/collective.md @@ -0,0 +1,54 @@ +--- +sidebar_position: 1 +description: Obol Collective Overview +--- + +# Collective Overview + +## Purpose + +The Obol Collective’s governance system has two primary goals: + +1. **Resource allocation.** Allocate resources effectively to support the Collective’s vision and grow the Obol Collective's sustainable value. Long-term vision may sometimes conflict with short-term value creation; thus, governance requires a blend of short-term and long-term thinking to allocate the token treasury and protocol revenue effectively. +2. **Capture resistance.** Governance plays a key role in securing the anti-capture and censorship resistance of the Obol Collective. Governance should: + 1. make it possible for operations to continue over the long term without reliance on any individual entity; + 2. prevent any one entity or small group of entities from being able to control or censor. + +## Overview: The Obol Token House and RAF + +Two houses govern the Obol Collective: the Token House and the Obol RAF. + +In the **Token House,** OBOL Token holders are responsible for submitting, deliberating, and voting on governance proposals using the Governance Portal. Token holders can delegate their OBOL Token voting power to their own address or an eligible third party. Addresses with delegated voting power are called “Delegates”. + +In the **Obol Retroactive Fund (RAF)**, OBOL Token Delegates are responsible for allocating funds within the RAF to projects and teams that provide value to the Obol Collective. + +All OBOL holders and Delegates are expected to exercise their authority responsibly and follow the Delegate [Rules of Engagement](https://community.obol.org/t/delegates-rules-of-engagement/206) and the general [Code of Conduct](https://community.obol.org/t/code-of-conduct-for-discussion-forum/205) for the forum. + +![Goverance Houses](/img/GovernanceHouses.png) + +## The Security Council + +The Security Council is a committee of multi-sig wallet signers with the power to perform certain emergency actions as delegated to it by the Obol Association. + +The Security Council can execute any software upgrade or perform other emergency actions without delay to respond to a security emergency, should one arise. The Security Council must not use its power to perform Emergency Actions except in a true security emergency, such as a critical vulnerability that could significantly compromise the Obol Collective. + +After taking any Emergency Action, the Security Council must issue a full transparency report (at an appropriate time after the security emergency has passed) explaining what was done and why such action was justified. + + +## Administration and Implementation + +In all cases, Obol Collective governance is intended to be carried out in a manner consistent with the Delegate [Rules of Engagement](https://community.obol.org/t/delegates-rules-of-engagement/206) and the general [Code of Conduct](https://community.obol.org/t/code-of-conduct-for-discussion-forum/205) for the forum. The Obol Association will steward this process as described below, with the goal of increasingly decentralising its role over time. + +The Obol Association, via its governance administrators, will facilitate administration to ensure that anyone may participate thoughtfully in governance. Such administrative services may include: + +- Moderation of governance proposals to ensure they are validly submitted and voted upon; +- Removal of proposals that reasonably appear to be fraudulent, spam-oriented, defamatory, hateful, or otherwise inappropriate or inconsistent with the values of the Collective; +- Monitoring of votes, voting power, the votable token supply, and voting periods for purposes of determining whether quorums and approval thresholds are met or accurately reflected; +- Management of mutually contradictory or duplicate proposals that are submitted simultaneously or close to one another; +- Maintenance of the Governance & RAF Portal; +- Other tasks that the Obol Association deems appropriate in connection with the above. + +Approved governance proposals will be routed to the Obol Association for implementation. Upon receipt of an approved proposal or chosen RAF recipients, the Obol Association will determine whether the proposal is safe, consistent with the purposes of the Obol Collective, and capable of being implemented legally (including potential KYC requirements). + +- If it is, the Association will act diligently and in a commercially reasonable manner to consider the proposal for implementation. +- If it is not, the Association may, at its discretion, remove the proposal for resubmission or implement it with guardrails, coupled with an explanation. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/contributions.md b/versioned_docs/version-v1.2.0/gov/governance/contributions.md new file mode 100644 index 0000000000..de06315b2b --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/contributions.md @@ -0,0 +1,92 @@ +--- +sidebar_position: 6 +description: Earning Obol Contributions +--- + +# Obol Contributions Program + +*Our aim is to encourage continuous participation and sustained support for Ethereum’s consensus. The Obol Contributions program is therefore designed to be fair and transparent, without arbitrary gimmicks promoting short-term interest at the cost of long-term participation. Obol Contributions are meant to acknowledge all of the actors in the staking stack for their contribution to Ethereum’s decentralisation.* + +*Each Obol DV contributes 1% of the total staking reward it earns to the Obol Collective. This economic stream is the basis for recognized Contributions. By basing Contributions on staking rewards accumulated by validators (pubkeys), performance is factored in (i.e. effectiveness, uptime, etc.)* + +*Contributions are tracked in a centralised database off-chain, not on the blockchain as transferable or non-transferable tokens. Our [API endpoints](https://docs.obol.tech/api#tag/Address/operation/AddressController_getAddressContributions) enable users and protocols to query how much they have contributed. These endpoints are used to show individuals’ Contributions within the [Obol DV Launchpad](https://launchpad.obol.org) and partners’ staking platforms.* + +## What is the Obol Contributions Program? + +- An opportunity for every staker to participate in and get recognised for scaling and decentralising Ethereum by staking on Obol Distributed Validators (DVs). +- Stake deployed on Obol DVs will contribute 1% of staking rewards to the Obol Collective’s retroactive funding mechanism. +- Those contributions will be tracked and recognised, and serve as the basis for future governance and ownership in the Obol Collective. + +## How do I participate in the Obol Contributions Program? + +You can participate in the Obol Contributions Program by: + +1. **Running your own DV Cluster**: You can create and manage your own DV cluster using our [DV Launchpad](https://launchpad.obol.org), for example using a [DappNode](https://dappnode.com/). This allows you to directly participate in the network's decentralisation efforts and earn contributions. You can also visit our [Squad Staking](https://squadstaking.com) page for inspiration and to find squad members. + +2. **Staking through Staking Partners**: By participating in staking through our partners, your staking rewards will also contribute to the Obol Collective. For the current list of staking partners, please visit [obol.org/contributions](https://obol.org/contributions). + +## How can I track my contributions? + +If you’re staking through a partner product, their dashboard will display your contributions. If you’re running your own DV, your contributions will be displayed on the [DV Launchpad](https://launchpad.obol.org/cluster/list/). + +## What benefits do I get from contributing? + +Contributions will serve as the basis for future ownership and governance of the Obol Collective’s retroactive funding mechanism. + +## How are contributions calculated? + +Contributions are based on 1% of validator rewards, which are contributed to the Obol Collective’s “1% for Decentralization” retroactive fund. (retroactivefunding.obol.eth) Contributions are calculated daily and tracked off-chain. This means that the higher the effectiveness and uptime of your validators, the more rewards (and thus contributions) you will generate. + +If the total rewards \( R \) for a validator are split among operators with percentages \( p_1, p_2, ..., p_n \), the contributions \( C \) for each operator can be calculated as: + +Cᵢ = R × pᵢ × 0.01 × 1.01 + +where pᵢ is the split percentage for the i-th operator. We multiply by 1.01 to ensure that the full 1% contribution is accurately distributed among operators, compensating for the initial deduction of 1% from the total rewards. + +## I’m already running a mainnet Obol DV? What about me? + +If you are running an existing mainnet DV without the 1% split, you will not have tracked contributions and will need to deploy a new DV configuration to participate. + +## Wen Token? + +Obol Contributions are not a promise or indication of any future plans for an Obol Token. They are purely a recognition of your contribution to the security, resiliency, and decentralisation of Ethereum consensus through the adoption of Obol DVs. + +## What is the 1% for Decentralization retroactive funding program? + +Users of Obol distributed validator (DV) clusters contribute 1% of their staking rewards to a retroactive funding program. These funds will reward projects and innovations which add value and drive impact towards Ethereum’s decentralisation. Read more on our blog or on obol.org/governance. + +## Do I need to run my own validator to participate? + +No, you can stake on DVs and track your contributions through the partners listed on [obol.org/contributions](https://obol.org/contributions). More partners are coming soon. + +## Can I withdraw my staked ETH at any time? + +There is no penalty for exiting your validator. You simply stop accruing Contributions. The specific withdrawal policy will depend on the staking protocol used. + +## What is the minimum amount of ETH needed to stake? + +The minimum amount varies by staking protocol, please refer to each partner to determine the minimum amount of stake required. + +## What happens if my validator has downtime? + +Since Contributions are tied to staking rewards, the amount of Contributions earned is impacted by on performance metrics like effectiveness and uptime. + +## Will my contributions be public? + +Yes, contributions can be viewed through the [Obol API](https://docs.obol.org/api#tag/Address/operation/AddressController_getAddressContributions), though specific visibility settings depend on the protocol. + +## How do I know my contributions are being counted accurately? + +Contributions are tracked based on validator rewards, and you can verify them through [our API](https://docs.obol.org/api#tag/Address/operation/AddressController_getAddressContributions). + +## How can I increase my contributions? + +Contributions increase with higher stake, as well as higher performance and uptime of your validators. + +## What are the benefits of using distributed validators? + +Distributed validators increase the security, resiliency, and decentralisation of Ethereum. + +## How can I get support if I have issues? + +Support is available through Obol’s [Discord](https://discord.gg/obol) and partners’ support channels. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/delegate-guide.md b/versioned_docs/version-v1.2.0/gov/governance/delegate-guide.md new file mode 100644 index 0000000000..46858b792c --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/delegate-guide.md @@ -0,0 +1,57 @@ +--- +sidebar_position: 5 +description: Delegate Guide +--- + +# Delegate Guide + +## Delegate Rules of Engagement + +This document outlines the expectations, responsibilities, and values that should guide interactions within the Obol Collective governance community, including the Obol Forum, Discord, and working group meetings. This Code of Conduct will be reviewed periodically to incorporate feedback and adapt to governance needs. Any changes require community approval through the governance process. + +Please review these principles carefully to ensure alignment with the Obol Collective’s vision and commitment to a decentralized and sustainable ecosystem. + +Please read the rules of engagement on the forum [here](https://community.obol.org/t/delegates-rules-of-engagement/206). + +## **Governance Toolkit** + +These tools or their uses may change over time as governance evolves. For example, additional user interfaces dedicated to governance may be developed. Likewise, while voting currently takes place on-chain through the Governance Contract, some successful votes are administered and implemented by the Obol Association, which should not be the case indefinitely. + +Please see the toolkit on the forum [here](https://community.obol.org/t/governance-toolkit/207). + +## Delegates’ Guide to the RAF + +### How to vote in the RAF: + +1. Visit the RAF portal at http://raf.obol.org. You will see information about the current or upcoming RAF round. + +![RAF Portal Homepage](/img/RAFPortalExplainer1.png) + +2. Click the “projects” tab on the top of the page. Here you see the projects applying to the current RAF round and that have been approved. + +![RAF Portal Projects Page](/img/RAFPortalExplainer2.png) + +3. Add projects to your Ballot after reviewing their impact. (To examine a project, click anywhere on the project card to open the project details page.) Then, click on ‘View Ballot’ to visit the Ballot Page. + +![RAF Portal Project Selection](/img/RAFPortalExplainer3.png) + +4. The Ballot Page shows the projects you have chosen. Enter the desired number of votes for each project, based on your total vote allocation. Click “submit ballot” and sign the transaction. + +![RAF Ballot Review](/img/RAFPortalExplainer4.png) + +5. Once the RAF round is over, you will be able to see the results by clicking on the "stats" button at the top of the portal. The voting power of each OBOL Token Delegate is proportional to the amount of OBOL tokens delegated to them. However, the funding results are calculated using quadratic funding, meaning that the square root of the votes is used to determine the final allocation. This approach ensures a broader distribution of funding across the Obol Collective, rather than allowing a small number of winners to dominate. + +![RAF Round Stats](/img/RAFPortalExplainer5.png) + +### How to evaluate projects + +Allocating funding is not a perfect process. You may not feel like an expert on a given project, or it may be challenging to directly compare projects’ impact. Here are some considerations to use throughout the process. + +- **Don’t fake expertise. You are voting for *you* — not for all of the Collective.** + You are not individually responsible for knowing everything about every Obol RAF category. Some delegates may be experts in education, while others may be deeply experienced in technical infrastructure. We urge you to share your expertise with others, and where necessary, relying on the expertise of other trusted contributors. + +- **Make sense together** + Feel free to engage with other contributors and the broader Obol Collective in evaluating project impact. + +- **Make holistic impact determinations, but when in doubt, don’t make assumptions.** + A combination of data and research should leave you qualified to make an informed decision about a given applicant. Still, there might be some projects whose impact argument is more subjective and hard to quantify (in the education category, for example). Use your gut in these situations, but don’t be *too* charitable with an applicant. If an impact statement seems like a stretch, it probably is. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/obol-token.md b/versioned_docs/version-v1.2.0/gov/governance/obol-token.md new file mode 100644 index 0000000000..a59960fdd4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/obol-token.md @@ -0,0 +1,24 @@ +--- +sidebar_position: 4 +description: The OBOL Token +--- + +# The OBOL Token + +The OBOL Token is central to the governance and operation of the Obol Collective. It serves multiple purposes that are essential to its functioning. + +### Token Utility + +1. **Token Governance:** OBOL Token holders delegate their voting power to delegates who participate in the Token House decision-making processes. This includes voting on proposals affecting the Obol Collective’s direction, upgrades, and funding allocations. Read more about the Token House [here](./token-house.md). + +2. **Obol Retroactive Funding (RAF):** Token holders delegate their voting power to Delegates, who vote on the projects eligible for retroactive funding. Read more about the Obol RAF [here](./raf.md). + +3. **Staking**: Plans are in place to build staking for the OBOL Token using the Tally Protocol, similar to UNI Token staking for the Unichain and ARB Token staking. Activation of staking for the OBOL Token is subject to governance approval. + 1. Staking OBOL Tokens through Tally’s staking module wraps them into a yield-bearing token, stOBOL, preserving governance rights while enabling use in DeFi. In this case, the yield comes from two sources: **token inflation** (new OBOL tokens issued by the protocol while maintaining the 500M cap) and **protocol revenue**. This incentivises using the staking mechanism while ensuring that governance power is retained. Read more on the Tally Protocol [here](https://tally.mirror.xyz/Drw-uvqhUnJLRxg32sV-sqKZ785-AO85FBaCYeXqxhA). + 2. Delegates who actively participate in governance can receive a **portion of the staking yield** as payment for their service. This creates a system where both individual stakers and governance delegates are rewarded, aligning incentives for robust participation. + +4. **Restaking**: Plans are in place to list the OBOL Token on restaking platform such as Eigenlayer and Symbiotic to allow AVSs and the Obol Collective to leverage the security provided by restaked OBOL Tokens. + 1. Restaking takes the stOBOL Tokens from Tally’s staking module described above and uses them in additional protocols, such as Symbiotic or EigenLayer. The yield here is protocol-specific, often derived from fees, rewards, or staking incentives within those secondary protocols. However, restaking introduces added risk, such as slashing, if the protocol’s conditions aren’t met, whereas staking does not have these risks. + 2. Activation of restaking for the stOBOL Token is subject to governance (both on the Obol Collective side but also the restaking platform, if relevant) + +5. **DeFi:** Plans are in place to launch several DeFi avenues for the OBOL Token post-TGE, such as liquidity pools and lending protocols. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/raf.md b/versioned_docs/version-v1.2.0/gov/governance/raf.md new file mode 100644 index 0000000000..7881250977 --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/raf.md @@ -0,0 +1,67 @@ +--- +sidebar_position: 3 +description: The RAF +--- + +# The RAF + +Obol’s Retroactive Funding (RAF) mechanism is designed to strengthen and promote the decentralisation of Ethereum's settlement layer by rewarding projects that add value and drive impact for Ethereum’s decentralisation. Read more about the Obol RAF [here](https://blog.obol.org/1-percent-for-decentralisation/). + +Voting and funding distributions occur over a series of **Obol Retroactive Fund (RAF)** rounds, with OBOL token Delegates determining how funds from the RAF are allocated. + +## Overview of the Obol RAF: + +- Any Obol Collective project can make an application. +- OBOL Token Delegates vote on applications proportional to their delegated power and using [quadratic funding](https://qf.gitcoin.co/). +- The Obol RAF rounds occur at intervals and include phases for scoping, application creation, application review, voting, and funding distribution. +- The Obol Association will collect information from projects to distribute grants, including KYC, where required. + +Below is described the steps within each RAF round. Each step takes approximately one week. + +![RAF Explainer](/img/RAFexplainer.png) + +### Step 1: Scoping + +The scope of the round’s impact and the amount of funding to be allocated is defined by the Obol Association at the outset of the round and posted on the Obol Forum. + +### Step 2: Application Registration + +Projects are invited to create an application on the RAF Portal. Any project or team can apply, but in the future, governance may decide that an application needs to be made by someone holding a minimum amount of OBOL tokens. + +To apply for the Obol RAF, projects can create an application on [raf.obol.org](http://raf.obol.org/) by following these steps: + +1. **Create an Application:** Fill out the application form using the RAF Portal. +2. **Describe Impact:** Specify the category of the project and its impact. + +### Step 3: Application Review + +A subset of OBOL Delegates appointed by the Obol Association reviews applications to ensure compliance with the application rules. + +The Obol Association will also review applications to filter out spam or applications that do not align with the mission of the Obol Collective. + +### Step 4: Voting + +OBOL Delegates vote on projects in proportion to their voting power. Here’s how it works: + +1. **Eligibility:** Every address that has been delegated voting power by OBOL Token holders can submit votes. +2. **Voting Process:** Delegates vote using the [raf.obol.org](http://raf.obol.org) app. Depending on the type of application being reviewed, a specific set of criteria should be used to judge a project's contributions to the Obol Collective. + +:::warning +The Obol Association will monitor votes to ensure contributors are not voting for projects they are directly involved in or where a substantial conflict of interest may be present. Delegates are expected to act in accordance with the Delegate [Rules of Engagement](https://community.obol.org/t/delegates-rules-of-engagement/206) and the general [Code of Conduct](https://community.obol.org/t/code-of-conduct-for-discussion-forum/205) for the forum. +::: + +### Step 5: Tallying Results + +The voting power of each OBOL Token Delegate is proportional to the amount of OBOL tokens delegated to them. However, the funding results are calculated using [quadratic funding](https://qf.gitcoin.co/), meaning that the square root of the votes is used to determine the final allocation. This approach ensures a broader distribution of funding across the Obol Collective, rather than allowing a small number of winners to dominate. + +### Step 6: Compliance + +The Obol Association will review the list of selected projects, adjust if necessary and collect information from them to distribute the grant legally compliantly (including completing KYC if required). + +### Step 7: Funding distribution + +The overall reward amount for the round is divided among the winning projects based on the delegates' vote and allocation according to the quadratic funding mechanism. + +### **Step 8: Community retrospective** + +After each round, The Obol Association will conduct a retrospective and gather community feedback. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/gov/governance/token-house.md b/versioned_docs/version-v1.2.0/gov/governance/token-house.md new file mode 100644 index 0000000000..1776d5135d --- /dev/null +++ b/versioned_docs/version-v1.2.0/gov/governance/token-house.md @@ -0,0 +1,79 @@ +--- +sidebar_position: 2 +description: The Token House +--- + +# The Token House + +In the Token House, OBOL Token holders are responsible for submitting, deliberating, and voting on various Obol Collective governance proposals. They may do so by directly voting with their OBOL Tokens (by delegating the voting power of their OBOL tokens to their own address) or by delegating their OBOL Token voting power to an eligible third party. Addresses with delegated OBOL voting power are called “Delegates”. + +## Overview of the Obol Token House + +- Proposals are reviewed and voted on in three-week cycles. +- In the first week, anyone may draft a proposal of any type based on [this template](https://community.obol.org/t/proposal-template/208) and post it on the [Forum](https://community.obol.org/) with [Draft] in the title. Delegates, Obol Collective members, and the general public can provide feedback on proposals in the forum, which should be incorporated into the proposal using the author’s best judgment. +- Once your proposal has been approved by four top-100 Delegates (by voting power), add a link to your proposal to the Voting Cycle Roundup forum thread by the last day of Week 2, and update the title from [Draft] to [Final]. The proposal then moves on to Week 3: Voting. +- If your proposal is passed, the Obol Association will facilitate its administration, including distributing approved OBOL Tokens grants. +- If your proposal fails, you can make a new proposal in the next cycle, specifying how you have incorporated significant changes from your first proposal. + +![Token House Explainer](/img/TokenHouseExplainer.png) + +## Token House Governance Proposal Process + +Anyone can submit a proposal to the Token House by using the Governance Portal. Proposals are accepted or rejected using a voting process. A proposal must be one of the valid proposal types listed below and follow the voting process described here. + +All governance proposals go through a 3-week cycle. Each “week” runs from Thursday at 7:00 pm GMT until Wednesday at 7:00 pm GMT. + +### Valid Proposal Types + +All governance proposals must fall within one of the following categories: + +- Ecosystem grants +- Protocol upgrades +- Token inflation adjustment +- Treasury allocations +- Work agreements +- Veto on the Obol RAF operations + +### Weeks 1-2: Feedback and Review + +All proposal types should be posted to the Obol Forum for review. Proposal authors are expected to respond to feedback from Obol Collective members and Delegates. + +Proposals should be: + +- Submitted as a new discussion thread on the [Governance Forum](http://community.obol.tech/) in the appropriate Proposal Category. +- Marked with [Draft] in the title. +- Formatted and contain information consistent with the [standard proposal template](https://community.obol.org/t/proposal-template/208). + +Before the end of Week 2, a governance administrator will create a Voting Cycle Roundup thread in the forum summarising all the proposals that meet the voting requirements for Week 3 Voting. + +For proposals to proceed to Week 3, four of the top 100 delegates by voting power must give explicit approval on the discussion thread. Delegates may signal approval by pasting the following comment on the proposal discussion thread: *”I am an Obol Delegate [link to your delegate commitment in your Tally profile] with sufficient voting power, and I believe this proposal is ready to move to a vote."* + +If a Delegate approves a proposal to move to a vote, it does not endorse that proposal. It simply signifies that they believe the proposal is ready to move to a vote. + +After receiving the required approvals, the author should update the proposal title from [Draft] to [Final] and add a link to their proposal in the Voting Cycle Roundup thread by the last day of Week 2 at 19:00 GMT. Authors should also include a summary of incorporated feedback as a comment on their proposal thread so future reviewers can understand the proposal’s progress. Proposal authors should include relevant links if feedback was gathered outside the Forum (e.g., on Discord). + +If a proposal author does not get explicit approval or wants more time for feedback, they should not include their proposal in the Voting Cycle Roundup thread. Instead, they should continue to seek community feedback and submit an updated proposal in the next voting cycle. + +### Week 3: Voting + +During the third week, Delegates (including OBOL Token holders who have self-delegated) vote on proposals via the [Obol Governance Portal](https://vote.obol.org/). All proposals are included in voting if they were added to the Voting Cycle Roundup thread before the deadline and have the approval of four top-100 delegates. + +A governance proposal is **approved** if it satisfies two requirements: + +- **Quorum:** A minimum number of OBOL Token votes measured as *a % of the total votable OBOL Token supply, as of the start of the voting period.* “Votable supply” is the total amount of OBOL Tokens that have been delegated (including to oneself) and, therefore, can participate in voting. +- **Approval threshold:** The minimum *% of votes cast in support relative to the total number of votes.* + +A snapshot is taken at the start of each voting period to determine each delegate’s voting power. Voting is hosted on the Obol Governance Portal. + +If a proposal is submitted for a vote and does not pass, the proposal will not be executed. If a proposal author wishes to iterate on a rejected proposal, they should: + +1. Create a new proposal thread on the Forum. +2. Include a link to the first proposal that did not pass. +3. Clearly identify what has changed in the new proposal. + +## Implementation + +Approved governance proposals will be routed to the Obol Association for implementation. Upon receipt of an approved proposal, the Obol Association will determine whether the proposal is safe, consistent with the purposes of the Obol Collective, and capable of being implemented legally (including potential KYC requirements). + +- If it is, the Association will act diligently and in a commercially reasonable manner to consider the proposal for implementation. +- If it is not, the Association may, at its discretion, remove the proposal for resubmission or implement it with guardrails. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/guides/_category_.json b/versioned_docs/version-v1.2.0/guides/_category_.json new file mode 100644 index 0000000000..a3633cebdd --- /dev/null +++ b/versioned_docs/version-v1.2.0/guides/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "BEGINNER GUIDES", + "position": 5, + "collapsed": false, + "collapsible": false, + "className": "menuSection" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/guides/walkthroughs/_category_.json b/versioned_docs/version-v1.2.0/guides/walkthroughs/_category_.json new file mode 100644 index 0000000000..826aab8320 --- /dev/null +++ b/versioned_docs/version-v1.2.0/guides/walkthroughs/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Walkthroughs", + "position": 1, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/guides/walkthroughs/walkthrough-guides.md b/versioned_docs/version-v1.2.0/guides/walkthroughs/walkthrough-guides.md new file mode 100644 index 0000000000..93a8b4510b --- /dev/null +++ b/versioned_docs/version-v1.2.0/guides/walkthroughs/walkthrough-guides.md @@ -0,0 +1,10 @@ +--- +sidebar_position: 1 +description: Walkthrough guides. +--- + +# Walkthrough Guides + +This section contains walkthrough guides for beginner users. Some of these guides are specific to particular hardware or operating systems, and may not replace more general documentation. These guides contain more detailed step-by-step information which may be useful for beginner users with limited experience with Linux, validators, and other topics which the primary guides assume some prior familiarity of. + +Some of these guides are community-created, and may contain issues or omissions. diff --git a/versioned_docs/version-v1.2.0/learn/_category_.json b/versioned_docs/version-v1.2.0/learn/_category_.json new file mode 100644 index 0000000000..e2c3c6e4e0 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "LEARN", + "position": 1, + "collapsed": false, + "collapsible": false, + "className": "menuSection" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/learn/charon/_category_.json b/versioned_docs/version-v1.2.0/learn/charon/_category_.json new file mode 100644 index 0000000000..08cbf7c7eb --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Charon", + "position": 2, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/learn/charon/charon-cli-reference.md b/versioned_docs/version-v1.2.0/learn/charon/charon-cli-reference.md new file mode 100644 index 0000000000..418b8e0ddb --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/charon-cli-reference.md @@ -0,0 +1,704 @@ +--- +description: A go-based middleware client for taking part in Distributed Validator clusters. +sidebar_position: 5 +--- + +# CLI Reference + +The following is a reference for Charon version [`v1.2.0`](https://github.com/ObolNetwork/charon/releases/tag/v1.2.0). Find the latest release on [our Github](https://github.com/ObolNetwork/charon/releases). + +The following are the top-level commands available to use. + +```markdown +charon --help +Charon enables the operation of Ethereum validators in a fault tolerant manner by splitting the validating keys across a group of trusted parties using threshold cryptography. + +Usage: + charon [command] + +Available Commands: + alpha Alpha subcommands provide early access to in-development features + combine Combine the private key shares of a distributed validator cluster into a set of standard validator private keys + completion Generate the autocompletion script for the specified shell + create Create artifacts for a distributed validator cluster + dkg Participate in a Distributed Key Generation ceremony + enr Print the ENR that identifies this client + exit Exit a distributed validator. + help Help about any command + relay Start a libp2p relay server + run Run the charon middleware client + version Print version and exit + +Flags: + -h, --help Help for charon + +Use "charon [command] --help" for more information about a command. +``` + +## The `create` command + +The `create` command handles the creation of artifacts needed by Charon to operate. + +```markdown +charon create --help +Create artifacts for a distributed validator cluster. These commands can be used to facilitate the creation of a distributed validator cluster between a group of operators by performing a distributed key generation ceremony, or they can be used to create a local cluster for single operator use cases. + +Usage: + charon create [command] + +Available Commands: + cluster Create private keys and configuration files needed to run a distributed validator cluster locally + dkg Create the configuration for a new Distributed Key Generation ceremony using charon dkg + enr Create an Ethereum Node Record (ENR) private key to identify this charon client + +Flags: + -h, --help Help for create + +Use "charon create [command] --help" for more information about a command. +``` + +### Creating an ENR for Charon + +An `enr` is an Ethereum Node Record. It is used to identify this Charon client to its other counterparty Charon clients across the internet. + +```markdown +charon create enr --help +Create an Ethereum Node Record (ENR) private key to identify this charon client + +Usage: + charon create enr [flags] + +Flags: + --data-dir string The directory where charon will store all its internal data. (default ".charon") + -h, --help Help for enr +``` + +### Create a full cluster locally + +The `charon create cluster` command creates a set of distributed validators locally; including the private keys, a `cluster-lock.json` file, and deposit data. This command should only be used for solo-operation of distributed validators. To run a distributed validator cluster with a group of operators, it is preferable to create these artifacts using the [DV Launchpad](../../learn/intro/launchpad.md) and the `charon dkg` command. That way, no single operator custodies all of the private keys to a distributed validator. + +:::warning +This command produces new distributed validator private keys or handles and splits pre-existing traditional validator private keys, please use caution and keep these private keys securely backed up and secret. +::: + +```markdown +charon create cluster --help +Creates a local charon cluster configuration including validator keys, charon p2p keys, cluster-lock.json and deposit-data.json file(s). See flags for supported features. + +Usage: + charon create cluster [flags] + +Flags: + --cluster-dir string The target folder to create the cluster in. (default "./") + --definition-file string Optional path to a cluster definition file or an HTTP URL. This overrides all other configuration flags. + --deposit-amounts ints List of partial deposit amounts (integers) in ETH. Values must sum up to exactly 32ETH. + --fee-recipient-addresses strings Comma separated list of Ethereum addresses of the fee recipient for each validator. Either provide a single fee recipient address or fee recipient addresses for each validator. + -h, --help Help for cluster + --insecure-keys Generates insecure keystore files. This should never be used. It is not supported on mainnet. + --keymanager-addresses strings Comma separated list of keymanager URLs to import validator key shares to. Note that multiple addresses are required, one for each node in the cluster, with node0's keyshares being imported to the first address, node1's keyshares to the second, and so on. + --keymanager-auth-tokens strings Authentication bearer tokens to interact with the keymanager URLs. Don't include the "Bearer" symbol, only include the api-token. + --name string The cluster name + --network string Ethereum network to create validators for. Options: mainnet, goerli, sepolia, holesky, gnosis, chiado. + --nodes int The number of charon nodes in the cluster. Minimum is 3. + --num-validators int The number of distributed validators needed in the cluster. + --publish Publish lock file to obol-api. + --publish-address string The URL to publish the lock file to. (default "https://api.obol.tech/v1") + --split-existing-keys Split an existing validator's private key into a set of distributed validator private key shares. Does not re-create deposit data for this key. + --split-keys-dir string Directory containing keys to split. Expects keys in keystore-*.json and passwords in keystore-*.txt. Requires --split-existing-keys. + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version of the custom test network (in hex). + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. + --threshold int Optional override of threshold required for signature reconstruction. Defaults to ceil(n*2/3) if zero. Warning, non-default values decrease security. + --withdrawal-addresses strings Comma separated list of Ethereum addresses to receive the returned stake and accrued rewards for each validator. Either provide a single withdrawal address or withdrawal addresses for each validator. +``` + +### Creating the configuration for a DKG Ceremony + +This `charon create dkg` command creates a `cluster_definition.json` file used for the `charon dkg` command. + +```markdown +charon create dkg --help +Create a cluster definition file that will be used by all participants of a DKG. + +Usage: + charon create dkg [flags] + +Flags: + --deposit-amounts ints List of partial deposit amounts (integers) in ETH. Values must sum up to exactly 32ETH. + --dkg-algorithm string DKG algorithm to use; default, frost (default "default") + --fee-recipient-addresses strings Comma separated list of Ethereum addresses of the fee recipient for each validator. Either provide a single fee recipient address or fee recipient addresses for each validator. + -h, --help Help for dkg + --name string Optional cosmetic cluster name + --network string Ethereum network to create validators for. Options: mainnet, goerli, sepolia, holesky, gnosis, chiado. (default "mainnet") + --num-validators int The number of distributed validators the cluster will manage (32ETH staked for each). (default 1) + --operator-enrs strings [REQUIRED] Comma-separated list of each operator's Charon ENR address. + --output-dir string The folder to write the output cluster-definition.json file to. (default ".charon") + -t, --threshold int Optional override of threshold required for signature reconstruction. Defaults to ceil(n*2/3) if zero. Warning, non-default values decrease security. + --withdrawal-addresses strings Comma separated list of Ethereum addresses to receive the returned stake and accrued rewards for each validator. Either provide a single withdrawal address or withdrawal addresses for each validator. +``` + +## The `dkg` command + +### Performing a DKG Ceremony + +The `charon dkg` command takes a `cluster_definition.json` file that instructs Charon on the terms of a new distributed validator cluster to be created. Charon establishes communication with the other nodes identified in the file, performs a distributed key generation ceremony to create the required threshold private keys, and signs deposit data for each new distributed validator. The command outputs the `cluster-lock.json` file and key shares for each Distributed Validator created. + +```markdown +charon dkg --help +Participate in a distributed key generation ceremony for a specific cluster definition that creates +distributed validator key shares and a final cluster lock configuration. Note that all other cluster operators should run +this command at the same time. + +Usage: + charon dkg [flags] + +Flags: + --data-dir string The directory where charon will store all its internal data. (default ".charon") + --definition-file string The path to the cluster definition file or an HTTP URL. (default ".charon/cluster-definition.json") + -h, --help Help for dkg + --keymanager-address string The keymanager URL to import validator keyshares. + --keymanager-auth-token string Authentication bearer token to interact with keymanager API. Don't include the "Bearer" symbol, only include the api-token. + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --no-verify Disables cluster definition and lock file verification. + --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. + --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. + --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) + --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. + --publish Publish the created cluster to a remote API. + --publish-address string The URL to publish the cluster to. (default "https://api.obol.tech/v1") + --publish-timeout duration Timeout for publishing a cluster, consider increasing if the cluster contains more than 200 validators. (default 30s) + --shutdown-delay duration Graceful shutdown delay. (default 1s) + --timeout duration Timeout for the DKG process, should be increased if DKG times out. (default 1m0s) +``` + +## The `run` command + +### Run the Charon middleware + +This `run` command accepts a `cluster-lock.json` file that was created either via a `charon create cluster` command or `charon dkg`. This lock file outlines the nodes in the cluster and the distributed validators they operate on behalf of. + +```markdown +charon run --help +Starts the long-running Charon middleware process to perform distributed validator duties. + +Usage: + charon run [flags] + +Flags: + --beacon-node-endpoints strings Comma separated list of one or more beacon node endpoint URLs. + --beacon-node-submit-timeout duration Timeout for the submission-related HTTP requests Charon makes to the configured beacon nodes. (default 2s) + --beacon-node-timeout duration Timeout for the HTTP requests Charon makes to the configured beacon nodes. (default 2s) + --builder-api Enables the builder api. Will only produce builder blocks. Builder API must also be enabled on the validator client. Beacon node must be connected to a builder-relay to access the builder network. + --debug-address string Listening address (ip and port) for the pprof and QBFT debug API. It is not enabled by default. + --feature-set string Minimum feature set to enable by default: alpha, beta, or stable. Warning: modify at own risk. (default "stable") + --feature-set-disable strings Comma-separated list of features to disable, overriding the default minimum feature set. + --feature-set-enable strings Comma-separated list of features to enable, overriding the default minimum feature set. + -h, --help Help for run + --jaeger-address string Listening address for jaeger tracing. + --jaeger-service string Service name used for jaeger tracing. (default "charon") + --lock-file string The path to the cluster lock file defining the distributed validator cluster. If both cluster manifest and cluster lock files are provided, the cluster manifest file takes precedence. (default ".charon/cluster-lock.json") + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --loki-addresses strings Enables sending of logfmt structured logs to these Loki log aggregation server addresses. This is in addition to normal stderr logs. + --loki-service string Service label sent with logs to Loki. (default "charon") + --manifest-file string The path to the cluster manifest file. If both cluster manifest and cluster lock files are provided, the cluster manifest file takes precedence. (default ".charon/cluster-manifest.pb") + --monitoring-address string Listening address (ip and port) for the monitoring API (prometheus). (default "127.0.0.1:3620") + --no-verify Disables cluster definition and lock file verification. + --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. + --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. + --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) + --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. + --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --private-key-file-lock Enables private key locking to prevent multiple instances using the same key. + --proc-directory string Directory to look into in order to detect other stack components running on the host. + --simnet-beacon-mock Enables an internal mock beacon node for running a simnet. + --simnet-beacon-mock-fuzz Configures simnet beaconmock to return fuzzed responses. + --simnet-slot-duration duration Configures slot duration in simnet beacon mock. (default 1s) + --simnet-validator-keys-dir string The directory containing the simnet validator key shares. (default ".charon/validator_keys") + --simnet-validator-mock Enables an internal mock validator client when running a simnet. Requires simnet-beacon-mock. + --synthetic-block-proposals Enables additional synthetic block proposal duties. Used for testing of rare duties. + --testnet-capella-hard-fork string Capella hard fork version of the custom test network. + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version in hex of the custom test network. + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. + --validator-api-address string Listening address (ip and port) for validator-facing traffic proxying the beacon-node API. (default "127.0.0.1:3600") +``` + +## The `exit` command + +A running Charon client will [aggregate and broadcast](../../run/running/quickstart-exit.mdx) signed exit messages it receives from its valdiator client immediately. These `exit` commands are instead used to *pre-sign* exit messages for an active distributed validator, to save to disk, or to broadcast; once enough of the operators of the cluster have submitted their partial exit signatures. Fully signed exit messages give a user or protocol a guarantee that they can exit an active validator at any point in future without the further assistance of the cluster's operators. In future, [execution-layer initiated exits](https://eips.ethereum.org/EIPS/eip-7002) will provide an even stronger guarantee that a validator can be exited by the withdrawal address it belongs to. + +```markdown +charon exit --help +Sign and broadcast distributed validator exit messages using a remote API. + +Usage: + charon exit [command] + +Available Commands: + active-validator-list List all active validators + broadcast Submit partial exit message for a distributed validator + fetch Fetch a signed exit message from the remote API + sign Sign partial exit message for a distributed validator + +Flags: + -h, --help Help for exit + +Use "charon exit [command] --help" for more information about a command. +``` + +### Pre-sign exit messages for active validators + +:::warning +This command requires Charon to access the distributed validator's private keys, please use caution and keep these private keys securely backed up and secret. + +The default `publish-address` for this command sends signed exit messages to Obol's [API](/api) for aggregation and distribution. Exit signatures are stored in line with Obol's [terms and contiditions](https://obol.tech/terms.pdf). +::: + +This command submits partial exit signatures to the remote API for aggregation. The required flags are `--beacon-node-url` and `--validator-public-key` of the validator you wish to exit. An exit message can only be signed for a validator that is fully deposited and assigned a validator index. + +```markdown +charon exit sign --help +Sign a partial exit message for a distributed validator and submit it to a remote API for aggregation. + +Usage: + charon exit sign [flags] + +Flags: + --all Exit all currently active validators in the cluster. + --beacon-node-endpoints strings Comma separated list of one or more beacon node endpoint URLs. [REQUIRED] + --beacon-node-timeout duration Timeout for beacon node HTTP calls. (default 30s) + --exit-epoch uint Exit epoch at which the validator will exit, must be the same across all the partial exits. (default 162304) + -h, --help Help for sign + --lock-file string The path to the cluster lock file defining the distributed validator cluster. (default ".charon/cluster-lock.json") + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --publish-address string The URL of the remote API. (default "https://api.obol.tech/v1") + --publish-timeout duration Timeout for publishing a signed exit to the publish-address API. (default 5m0s) + --testnet-capella-hard-fork string Capella hard fork version of the custom test network. + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version of the custom test network (in hex). + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. + --validator-index uint Validator index of the validator to exit, the associated public key must be present in the cluster lock manifest. If --validator-public-key is also provided, validator existence won't be checked on the beacon chain. + --validator-keys-dir string Path to the directory containing the validator private key share files and passwords. (default ".charon/validator_keys") + --validator-public-key string Public key of the validator to exit, must be present in the cluster lock manifest. If --validator-index is also provided, validator liveliness won't be checked on the beacon chain. +``` + +### Download fully signed exit messages for cold storage + +Once enough operators have submitted their partial signatures for an active validator, you can use the `charon exit fetch` command to download the complete exit message to a file for safe keeping. This file can be given to a delegator who wants a guarantee that they can exit the distributed validator if need be. + +```markdown +charon exit fetch --help +Fetches a fully signed exit message for a given validator from the remote API and writes it to disk. + +Usage: + charon exit fetch [flags] + +Flags: + --all Exit all currently active validators in the cluster. + --fetched-exit-path string Path to store fetched signed exit messages. (default "./") + -h, --help Help for fetch + --lock-file string The path to the cluster lock file defining the distributed validator cluster. (default ".charon/cluster-lock.json") + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --publish-address string The URL of the remote API. (default "https://api.obol.tech/v1") + --publish-timeout duration Timeout for publishing a signed exit to the publish-address API. (default 5m0s) + --testnet-capella-hard-fork string Capella hard fork version of the custom test network. + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version of the custom test network (in hex). + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. + --validator-public-key string Public key of the validator to exit, must be present in the cluster lock manifest. If --validator-index is also provided, validator liveliness won't be checked on the beacon chain. +``` + +### Broadcast a signed exit message + +The `charon exit broadcast` subcommand can be used to broadcast either a signed exit message from a file that was downloaded via the `fetch` command, or it can retrieve and broadcast an exit message directly from the API. + +```markdown +charon exit broadcast --help +Retrieves and broadcasts to the configured beacon node a fully signed validator exit message, aggregated with the available partial signatures retrieved from the publish-address. Can also read a signed exit message from disk, in order to be broadcasted to the configured beacon node. + +Usage: + charon exit broadcast [flags] + +Flags: + --all Exit all currently active validators in the cluster. + --beacon-node-endpoints strings Comma separated list of one or more beacon node endpoint URLs. [REQUIRED] + --beacon-node-timeout duration Timeout for beacon node HTTP calls. (default 30s) + --exit-epoch uint Exit epoch at which the validator will exit, must be the same across all the partial exits. (default 162304) + --exit-from-dir string Retrieves a signed exit messages from a pre-prepared files in a directory instead of --publish-address. + --exit-from-file string Retrieves a signed exit message from a pre-prepared file instead of --publish-address. + -h, --help Help for broadcast + --lock-file string The path to the cluster lock file defining the distributed validator cluster. (default ".charon/cluster-lock.json") + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --publish-address string The URL of the remote API. (default "https://api.obol.tech/v1") + --publish-timeout duration Timeout for publishing a signed exit to the publish-address API. (default 5m0s) + --testnet-capella-hard-fork string Capella hard fork version of the custom test network. + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version of the custom test network (in hex). + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. + --validator-keys-dir string Path to the directory containing the validator private key share files and passwords. (default ".charon/validator_keys") + --validator-public-key string Public key of the validator to exit, must be present in the cluster lock manifest. If --validator-index is also provided, validator liveliness won't be checked on the beacon chain. +``` + +## The `combine` command + +### Combine distributed validator key shares into a single validator key + +The `combine` command combines many validator key shares into a single Ethereum validator key. + +:::warning +This command requires Charon to access the distributed validator's private keys, please use caution and keep these private keys securely backed up and secret. +::: + +```markdown +charon combine --help +Combines the private key shares from a threshold of operators in a distributed validator cluster into a set of validator private keys that can be imported into a standard Ethereum validator client. + +Warning: running the resulting private keys in a validator alongside the original distributed validator cluster *will* result in slashing. + +Usage: + charon combine [flags] + +Flags: + --cluster-dir string Parent directory containing a number of .charon subdirectories from the required threshold of nodes in the cluster. (default ".charon/cluster") + --force Overwrites private keys with the same name if present. + -h, --help Help for combine + --no-verify Disables cluster definition and lock file verification. + --output-dir string Directory to output the combined private keys to. (default "./validator_keys") + --testnet-chain-id uint Chain ID of the custom test network. + --testnet-fork-version string Genesis fork version of the custom test network (in hex). + --testnet-genesis-timestamp int Genesis timestamp of the custom test network. + --testnet-name string Name of the custom test network. +``` + +To run this command, one needs at least a threshold number of node operator's `.charon` directories, which need to be organized into a single folder: + +```shell +tree ./cluster +cluster/ +├── node0 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +├── node1 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +├── node2 +│   ├── charon-enr-private-key +│   ├── cluster-lock.json +│   ├── deposit-data.json +│   └── validator_keys +│   ├── keystore-0.json +│   ├── keystore-0.txt +│   ├── keystore-1.json +│   └── keystore-1.txt +└── node3 + ├── charon-enr-private-key + ├── cluster-lock.json + ├── deposit-data.json + └── validator_keys + ├── keystore-0.json + ├── keystore-0.txt + ├── keystore-1.json + └── keystore-1.txt +``` + +That is, each operator `.charon` directory must be placed in a parent directory, and renamed to avoid conflicts. + +If for example the lock file defines 2 validators, each `validator_keys` directory must contain exactly 4 files, a JSON and TXT file for each validator. + +Those files must be named with an increasing index associated with the validator in the lock file, starting from 0. + +The chosen folder name does not matter, as long as it's different from `.charon`. + +At the end of the process `combine` will create a new directory specified by `--output-dir` containing the traditional validator private keystore. + +```shell +charon combine --cluster-dir="./cluster" --output-dir="./combined" +tree ./combined +combined +├── keystore-0.json +├── keystore-0.txt +├── keystore-1.json +└── keystore-1.txt +``` + +By default, the `combine` command will refuse to overwrite any private key that is already present in the destination directory. + +To force the process, use the `--force` flag. + +:::danger + +The generated private keys are in the standard [EIP-2335](https://github.com/ethereum/ercs/blob/master/ERCS/erc-2335.md) format, and can be imported in any Ethereum validator client that supports it. + +**Ensure your distributed validator cluster is completely shut down for at least two epochs before starting a replacement validator or you are likely to be slashed.** +::: + +## Host a relay + +Relays run a libp2p [circuit relay](https://docs.libp2p.io/concepts/nat/circuit-relay/) server that allows Charon clusters to perform peer discovery and for Charon clients behind strict NAT gateways to be communicated with. If you want to self-host a relay for your cluster(s) the following command will start one. + +```markdown +charon relay --help +Starts a libp2p circuit relay that charon clients can use to discover and connect to their peers. + +Usage: + charon relay [flags] + +Flags: + --auto-p2pkey Automatically create a p2pkey (secp256k1 private key used for p2p authentication and ENR) if none found in data directory. (default true) + --data-dir string The directory where charon will store all its internal data. (default ".charon") + --debug-address string Listening address (ip and port) for the pprof and QBFT debug API. It is not enabled by default. + -h, --help Help for relay + --http-address string Listening address (ip and port) for the relay http server serving runtime ENR. (default "127.0.0.1:3640") + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --loki-addresses strings Enables sending of logfmt structured logs to these Loki log aggregation server addresses. This is in addition to normal stderr logs. + --loki-service string Service label sent with logs to Loki. (default "charon") + --monitoring-address string Listening address (ip and port) for the monitoring API (prometheus). + --p2p-advertise-private-addresses Enable advertising of libp2p auto-detected private addresses. This doesn't affect manually provided p2p-external-ip/hostname. + --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. + --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. + --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. + --p2p-max-connections int Libp2p maximum number of peers that can connect to this relay. (default 16384) + --p2p-max-reservations int Updates max circuit reservations per peer (each valid for 30min) (default 512) + --p2p-relay-loglevel string Libp2p circuit relay log level. E.g., debug, info, warn, error. + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) + --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. +``` + +You can also consider adding [alternative public relays](../../adv/security/risks.md) to your cluster by specifying a list of `p2p-relays` in [`charon run`](#run-the-charon-middleware). + +## Experimental commands + +These commands are subject to breaking changes until they are moved outside of the `alpha` subcommand in a future release. + +### Test your candidate distributed validator cluster + +Charon comes with a test suite for understanding the suitability and readiness of a given setup. + +```markdown +charon alpha test --help +Test subcommands provide test suite to evaluate current cluster setup. The full validator stack can be tested - charon peers, consensus layer, validator client, MEV. Current machine's infra can be examined as well. + +Usage: + charon alpha test [command] + +Available Commands: + all Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity. + beacon Run multiple tests towards beacon nodes + infra Run multiple hardware and internet connectivity tests + mev Run multiple tests towards MEV relays + peers Run multiple tests towards peer nodes + validator Run multiple tests towards validator client + +Flags: + -h, --help Help for test + +Use "charon alpha test [command] --help" for more information about a command. +``` + +#### Test all + +```markdown +charon alpha test all --help +Run tests towards peer nodes, beacon nodes, validator client, MEV relays, own hardware and internet connectivity. Verify that Charon can efficiently do its duties on the tested setup. + +Usage: + charon alpha test all [flags] + +Flags: + --beacon-endpoints strings [REQUIRED] Comma separated list of one or more beacon node endpoint URLs. + --beacon-load-test Enable load test, not advisable when testing towards external beacon nodes. + --beacon-load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 5s) + --beacon-simulation-custom int Run custom simulation with the specified amount of validators. + --beacon-simulation-duration-in-slots int Time to keep running the simulation in slots. (default 32) + --beacon-simulation-file-dir string Time to keep running the simulation in slots. (default "./") + --beacon-simulation-verbose Show results for each request and each validator. + -h, --help Help for all + --infra-disk-io-block-size-kb int The block size in kilobytes used for I/O units. Same value applies for both reads and writes. (default 4096) + --infra-disk-io-test-file-dir string Directory at which disk performance will be measured. If none specified, current user's home directory will be used. + --infra-internet-test-servers-exclude strings List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing. + --infra-internet-test-servers-only strings List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically. + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --mev-beacon-node-endpoint string [REQUIRED] Beacon node endpoint URL used for block creation test. + --mev-endpoints strings Comma separated list of one or more MEV relay endpoint URLs. + --mev-load-test Enable load test. + --mev-number-of-payloads uint Increases the accuracy of the load test by asking for multiple payloads. Increases test duration. (default 1) + --output-json string File path to which output can be written in JSON format. + --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. + --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. + --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) + --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. + --peers-definition-file string The path to the cluster definition file or an HTTP URL. + --peers-direct-connection-timeout duration Time to keep trying to establish direct connection to peer. (default 2m0s) + --peers-enrs strings [REQUIRED] Comma-separated list of each peer ENR address. + --peers-keep-alive duration Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end. (default 30m0s) + --peers-load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 30s) + --peers-lock-file string The path to the cluster lock file defining the distributed validator cluster. + --peers-private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --quiet Do not print test results to stdout. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [DirectConn Ping PingMeasure PingLoad Libp2pTCPPortOpen PingMeasureRelay PingRelay Ping PingLoad Simulate500 Simulate1000 SimulateCustom PingMeasure Version Synced PeerCount Simulate1 Simulate10 Simulate100 Ping PingMeasure PingLoad Ping PingMeasure CreateBlock DiskWriteSpeed DiskReadSpeed AvailableMemory TotalMemory InternetLatency InternetDownloadSpeed InternetUploadSpeed DiskWriteIOPS DiskReadIOPS] + --timeout duration Execution timeout for all tests. (default 1h0m0s) + --validator-load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 5s) + --validator-validator-api-address string Listening address (ip and port) for validator-facing traffic proxying the beacon-node API. (default "127.0.0.1:3600") +``` + +#### Test beacon node + +```markdown +charon alpha test beacon --help +Run multiple tests towards beacon nodes. Verify that Charon can efficiently interact with Beacon Node(s). + +Usage: + charon alpha test beacon [flags] + +Flags: + --endpoints strings [REQUIRED] Comma separated list of one or more beacon node endpoint URLs. + -h, --help Help for beacon + --load-test Enable load test, not advisable when testing towards external beacon nodes. + --load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 5s) + --output-json string File path to which output can be written in JSON format. + --quiet Do not print test results to stdout. + --simulation-custom int Run custom simulation with the specified amount of validators. + --simulation-duration-in-slots int Time to keep running the simulation in slots. (default 32) + --simulation-file-dir string Time to keep running the simulation in slots. (default "./") + --simulation-verbose Show results for each request and each validator. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [Ping Version PingLoad Simulate100 Simulate500 PingMeasure Synced PeerCount Simulate1 Simulate10 Simulate1000 SimulateCustom] + --timeout duration Execution timeout for all tests. (default 1h0m0s) +``` + +#### Test infra + +```markdown +charon alpha test infra --help +Run multiple hardware and internet connectivity tests. Verify that Charon is running on host with sufficient capabilities. + +Usage: + charon alpha test infra [flags] + +Flags: + --disk-io-block-size-kb int The block size in kilobytes used for I/O units. Same value applies for both reads and writes. (default 4096) + --disk-io-test-file-dir string Directory at which disk performance will be measured. If none specified, current user's home directory will be used. + -h, --help Help for infra + --internet-test-servers-exclude strings List of server names to be excluded from the tests. To be specified only if you experience issues with a server that is wrongly considered best performing. + --internet-test-servers-only strings List of specific server names to be included for the internet tests, the best performing one is chosen. If not provided, closest and best performing servers are chosen automatically. + --output-json string File path to which output can be written in JSON format. + --quiet Do not print test results to stdout. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [DiskReadIOPS InternetLatency InternetDownloadSpeed DiskWriteSpeed DiskWriteIOPS DiskReadSpeed AvailableMemory TotalMemory InternetUploadSpeed] + --timeout duration Execution timeout for all tests. (default 1h0m0s) +``` + +#### Test MEV + +```markdown +charon alpha test mev --help +Run multiple tests towards MEV relays. Verify that Charon can efficiently interact with MEV relay(s). + +Usage: + charon alpha test mev [flags] + +Flags: + --beacon-node-endpoint string [REQUIRED] Beacon node endpoint URL used for block creation test. + --endpoints strings Comma separated list of one or more MEV relay endpoint URLs. + -h, --help Help for mev + --load-test Enable load test. + --number-of-payloads uint Increases the accuracy of the load test by asking for multiple payloads. Increases test duration. (default 1) + --output-json string File path to which output can be written in JSON format. + --quiet Do not print test results to stdout. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [Ping PingMeasure CreateBlock] + --timeout duration Execution timeout for all tests. (default 1h0m0s) +``` + +#### Test Charon peers + +```markdown +charon alpha test peers --help +Run multiple tests towards peer nodes. Verify that Charon can efficiently interact with Validator Client. + +Usage: + charon alpha test peers [flags] + +Flags: + --definition-file string The path to the cluster definition file or an HTTP URL. + --direct-connection-timeout duration Time to keep trying to establish direct connection to peer. (default 2m0s) + --enrs strings [REQUIRED] Comma-separated list of each peer ENR address. + -h, --help Help for peers + --keep-alive duration Time to keep TCP node alive after test completion, so connection is open for other peers to test on their end. (default 30m0s) + --load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 30s) + --lock-file string The path to the cluster lock file defining the distributed validator cluster. + --log-color string Log color; auto, force, disable. (default "auto") + --log-format string Log format; console, logfmt or json (default "console") + --log-level string Log level; debug, info, warn or error (default "info") + --log-output-path string Path in which to write on-disk logs. + --output-json string File path to which output can be written in JSON format. + --p2p-disable-reuseport Disables TCP port reuse for outgoing libp2p connections. + --p2p-external-hostname string The DNS hostname advertised by libp2p. This may be used to advertise an external DNS. + --p2p-external-ip string The IP address advertised by libp2p. This may be used to advertise an external IP. + --p2p-relays strings Comma-separated list of libp2p relay URLs or multiaddrs. (default [https://0.relay.obol.tech,https://2.relay.obol.dev,https://1.relay.obol.tech]) + --p2p-tcp-address strings Comma-separated list of listening TCP addresses (ip and port) for libP2P traffic. Empty default doesn't bind to local port therefore only supports outgoing connections. + --private-key-file string The path to the charon enr private key file. (default ".charon/charon-enr-private-key") + --quiet Do not print test results to stdout. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [PingMeasure PingLoad DirectConn Ping Libp2pTCPPortOpen] + --timeout duration Execution timeout for all tests. (default 1h0m0s) +``` + +#### Test validator client + +```markdown +charon alpha test validator --help +Run multiple tests towards validator client. Verify that Charon can efficiently interact with its validator client. + +Usage: + charon alpha test validator [flags] + +Flags: + -h, --help Help for validator + --load-test-duration duration Time to keep running the load tests in seconds. For each second a new continuous ping instance is spawned. (default 5s) + --output-json string File path to which output can be written in JSON format. + --quiet Do not print test results to stdout. + --test-cases strings List of comma separated names of tests to be exeucted. Available tests are: [Ping PingMeasure PingLoad] + --timeout duration Execution timeout for all tests. (default 1h0m0s) + --validator-api-address string Listening address (ip and port) for validator-facing traffic proxying the beacon-node API. (default "127.0.0.1:3600") +``` diff --git a/versioned_docs/version-v1.2.0/learn/charon/cluster-configuration.md b/versioned_docs/version-v1.2.0/learn/charon/cluster-configuration.md new file mode 100644 index 0000000000..efe8693ffd --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/cluster-configuration.md @@ -0,0 +1,163 @@ +--- +description: Documenting a Distributed Validator Cluster in a standardised file format +sidebar_position: 3 +--- + +# Cluster Configuration + +:::warning +These cluster definition and cluster lock files are a work in progress. The intention is for the files to be standardised for operating distributed validators via the [EIP process](https://eips.ethereum.org/) when appropriate. +::: + +This document describes the configuration options for running a Charon client or cluster. + +A Charon cluster is configured in two steps: + +- `cluster-definition.json` which defines the intended cluster configuration before keys have been created in a distributed key generation ceremony. +- `cluster-lock.json` which includes and extends `cluster-definition.json` with distributed validator BLS public key shares. + +In the case of a solo operator running a cluster, the [`charon create cluster`](./charon-cli-reference.md#create-a-full-cluster-locally) command combines both steps into one and just outputs the final `cluster-lock.json` without a DKG step. + +## Cluster Definition File + +The `cluster-definition.json` is provided as input to the DKG which generates keys and the `cluster-lock.json` file. + +### Using the CLI + +The [`charon create dkg`](./charon-cli-reference.md#creating-the-configuration-for-a-dkg-ceremony) command is used to create the `cluster-definition.json` file which is used as input to `charon dkg`. + +The schema of the `cluster-definition.json` is defined as: + +```json +{ + "name": "best cluster", // Optional cosmetic identifier + "creator": { + "address": "0x123..abfc", //ETH1 address of the creator + "config_signature": "0x123654...abcedf" // EIP712 Signature of config_hash using creator privkey + }, + "operators": [ + { + "address": "0x123..abfc", // ETH1 address of the operator + "enr": "enr://abcdef...12345", // Charon node ENR + "config_signature": "0x123456...abcdef", // EIP712 Signature of config_hash by ETH1 address priv key + "enr_signature": "0x123654...abcedf" // EIP712 Signature of ENR by ETH1 address priv key + } + ], + "uuid": "1234-abcdef-1234-abcdef", // Random unique identifier. + "version": "v1.2.0", // Schema version + "timestamp": "2022-01-01T12:00:00+00:00", // Creation timestamp + "num_validators": 2, // Number of distributed validators to be created in cluster-lock.json + "threshold": 3, // Optional threshold required for signature reconstruction + "validators": [ + { + "fee_recipient_address": "0x123..abfc", // ETH1 fee_recipient address of validator + "withdrawal_address": "0x123..abfc" // ETH1 withdrawal address of validator + }, + { + "fee_recipient_address": "0x123..abfc", // ETH1 fee_recipient address of validator + "withdrawal_address": "0x123..abfc" // ETH1 withdrawal address of validator + } + ], + "dkg_algorithm": "foo_dkg_v1", // Optional DKG algorithm for key generation + "fork_version": "0x00112233", // Chain/Network identifier + "config_hash": "0xabcfde...acbfed", // Hash of the static (non-changing) fields + "definition_hash": "0xabcdef...abcedef" // Final hash of all fields +} +``` + +### Using the DV Launchpad + +- A `leader/creator`, that wishes to coordinate the creation of a new Distributed Validator Cluster navigates to the launchpad and selects "Create new Cluster". +- The `leader/creator` uses the user interface to configure all of the important details about the cluster including: + - The `Withdrawal Address` for the created validators; + - The `Fee Recipient Address` for block proposals if it differs from the withdrawal address; + - The number of distributed validators to create; + - The list of participants in the cluster specified by Ethereum address(/ENS); + - The threshold of fault tolerance required. +- These key pieces of information form the basis of the cluster configuration. These fields (and some technical fields like DKG algorithm to use) are serialized and merklized to produce the definition's `cluster_definition_hash`. This merkle root will be used to confirm that there is no ambiguity or deviation between definitions when they are provided to Charon nodes. +- Once the `leader/creator` is satisfied with the configuration they publish it to the launchpad's data availability layer for the other participants to access. (For early development the launchpad will use a centralized backend db to store the cluster configuration. Near production, solutions like IPFS or arweave may be more suitable for the long term decentralization of the launchpad.) + +## Cluster Lock File + +The `cluster-lock.json` has the following schema: + +```json +{ + "cluster_definition": {...}, // Cluster definiition json, identical schema to above, + "distributed_validators": [ // Length equal to cluster_definition.num_validators. + { + "distributed_public_key": "0x123..abfc", // DV root pubkey + "public_shares": [ "abc...fed", "cfd...bfe"], // Length equal to cluster_definition.operators + "fee_recipient": "0x123..abfc" // Defaults to withdrawal address if not set, can be edited manually + } + ], + "lock_hash": "abcdef...abcedef", // Config_hash plus distributed_validators + "signature_aggregate": "abcdef...abcedef" // BLS aggregate signature of the lock hash signed by each DV pubkey. +} +``` + +## Cluster Size and Resilience + +The cluster size (the number of nodes/operators in the cluster) determines the resilience of the cluster; its ability remain operational under diverse failure scenarios. +Larger clusters can tolerate more faulty nodes. +However, increased cluster size implies higher operational costs and potential network latency, which may negatively affect performance. + +Optimal cluster size is therefore trade-off between resilience (larger is better) vs cost-efficiency and performance (smaller is better). + +Cluster resilience can be broadly classified into two categories: + +- **[Byzantine Fault Tolerance (BFT)](https://en.wikipedia.org/wiki/Byzantine_fault)** - the ability to tolerate nodes that are actively trying to disrupt the cluster. +- **[Crash Fault Tolerance (CFT)](https://en.wikipedia.org/wiki/Fault_tolerance)** - the ability to tolerate nodes that have crashed or are otherwise unavailable. + +Different cluster sizes tolerate different counts of byzantine vs crash nodes. +In practice, hardware and software crash relatively frequently, while byzantine behaviour is relatively uncommon. +However, Byzantine Fault Tolerance is crucial for trust minimised systems like distributed validators. +Thus, cluster size can be chosen to optimise for either BFT or CFT. + +The table below lists different cluster sizes and their characteristics: + +- `Cluster Size` - the number of nodes in the cluster. +- `Threshold` - the minimum number of nodes that must collaborate to reach consensus quorum and to create signatures. +- `BFT #` - the maximum number of byzantine nodes that can be tolerated. +- `CFT #` - the maximum number of crashed nodes that can be tolerated. + +| Cluster Size | Threshold | BFT # | CFT # | Note | +|--------------|-----------|-------|-------|------------------------------------| +| 1 | 1 | 0 | 0 | ❌ Invalid: Not CFT nor BFT! | +| 2 | 2 | 0 | 0 | ❌ Invalid: Not CFT nor BFT! | +| 3 | 2 | 0 | 1 | ⚠️ Warning: CFT but not BFT! | +| 4 | 3 | 1 | 1 | ✅ CFT and BFT optimal for 1 faulty | +| 5 | 4 | 1 | 1 | | +| 6 | 4 | 1 | 2 | ✅ CFT optimal for 2 crashed | +| 7 | 5 | 2 | 2 | ✅ BFT optimal for 2 byzantine | +| 8 | 6 | 2 | 2 | | +| 9 | 6 | 2 | 3 | ✅ CFT optimal for 3 crashed | +| 10 | 7 | 3 | 3 | ✅ BFT optimal for 3 byzantine | +| 11 | 8 | 3 | 3 | | +| 12 | 8 | 3 | 4 | ✅ CFT optimal for 4 crashed | +| 13 | 9 | 4 | 4 | ✅ BFT optimal for 4 byzantine | +| 14 | 10 | 4 | 4 | | +| 15 | 10 | 4 | 5 | ✅ CFT optimal for 5 crashed | +| 16 | 11 | 5 | 5 | ✅ BFT optimal for 5 byzantine | +| 17 | 12 | 5 | 5 | | +| 18 | 12 | 5 | 6 | ✅ CFT optimal for 6 crashed | +| 19 | 13 | 6 | 6 | ✅ BFT optimal for 6 byzantine | +| 20 | 14 | 6 | 6 | | +| 21 | 14 | 6 | 7 | ✅ CFT optimal for 7 crashed | +| 22 | 15 | 7 | 7 | ✅ BFT optimal for 7 byzantine | + +The table above is determined by the QBFT consensus algorithm with the +following formulas from [this](https://arxiv.org/pdf/1909.10194.pdf) paper: + +```shell +n = cluster size + +Threshold: min number of honest nodes required to reach quorum given size n +Quorum(n) = ceiling(2n/3) + +BFT #: max number of faulty (byzantine) nodes given size n +f(n) = floor((n-1)/3) + +CFT #: max number of unavailable (crashed) nodes given size n +crashed(n) = n - Quorum(n) +``` diff --git a/versioned_docs/version-v1.2.0/learn/charon/dkg.md b/versioned_docs/version-v1.2.0/learn/charon/dkg.md new file mode 100644 index 0000000000..322a0016a4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/dkg.md @@ -0,0 +1,73 @@ +--- +description: Generating private keys for a Distributed Validator requires a Distributed Key Generation (DKG) Ceremony. +sidebar_position: 2 +--- + +# Distributed Key Generation + +## Overview + +A [**distributed validator key**](../../learn/intro/key-concepts.md#distributed-validator-key) is a group of BLS private keys that together operate as a threshold key for participating in proof-of-stake consensus. + +To make a distributed validator with no fault-tolerance (i.e. all nodes need to be online to sign every message), due to the BLS signature scheme used by Proof of Stake Ethereum, each key share could be chosen by operators independently. However, to create a distributed validator that can stay online despite a subset of its nodes going offline, the key shares need to be generated together (4 randomly chosen points on a graph don't all necessarily sit on the same order three curve). To do this in a secure manner with no one party being trusted to distribute the keys requires what is known as a [**distributed key generation ceremony**](../../learn/intro/key-concepts.md#distributed-validator-key-generation-ceremony). + +The Charon client has the responsibility of securely completing a distributed key generation ceremony with its counterparty nodes. The ceremony configuration is outlined in a [cluster definition](../../learn/charon/cluster-configuration.md). + +## Actors Involved + +A distributed key generation ceremony involves `Operators` and their `Charon clients`. + +- An `Operator` is identified by their Ethereum address. They will sign a message with this address to authorize their Charon client to take part in the DKG ceremony. + +- A `Charon client` is also identified by a public/private key pair, in this instance, the public key is represented as an [Ethereum Node Record](https://eips.ethereum.org/EIPS/eip-778) (ENR). This is a standard identity format for both EL and CL clients. These ENRs are used by each Charon node to identify its cluster peers over the internet, and to communicate with one another in an [end to end encrypted manner](https://github.com/libp2p/go-libp2p/tree/master/p2p/security/noise). These keys need to be created (and backed up) by each operator before they can participate in a cluster creation. + +## Cluster Definition Creation + +This cluster definition specifies the intended cluster configuration before keys have been created in a distributed key generation ceremony. The `cluster-definition.json` file can be created with the help of the [Distributed Validator Launchpad](./cluster-configuration.md#using-the-dv-launchpad) or via the [CLI](./cluster-configuration.md#using-the-cli). + +## Carrying out the DKG ceremony + +Once all participants have signed the cluster definition, they can load the `cluster-definition` file into their Charon client, and the client will attempt to complete the DKG. + +Charon will read the ENRs in the definition, confirm that its ENR is present, and then will reach out to relays that are deployed to find the other ENRs on the network. (Fresh ENRs just have a public key and an IP address of 0.0.0.0 until they are loaded into a live Charon client, which will update the IP address and increment the ENRs nonce and resign with the clients private key. If an ENR with a higher nonce is seen by a Charon client, they will update the IP address of that ENR in their address book.) + +Once all clients in the cluster can establish a connection with one another and they each complete a handshake (confirm everyone has a matching `cluster_definition_hash`), the ceremony begins. + +No user input is required, Charon does the work and outputs the following files to each machine and then exits. + +## Backing up the ceremony artifacts + +At the end of a DKG ceremony, each operator will have a number of files outputted by their Charon client based on how many distributed validators the group chose to generate together. + +These files are: + +- **Validator keystore(s):** These files will be loaded into the operator's validator client and each file represents one share of a Distributed Validator. +- **A distributed validator cluster lock file:** This `cluster-lock.json` file contains the configuration a distributed validator client like Charon needs to join a cluster capable of operating a number of distributed validators. +- **Validator deposit data:** This file is used to activate one or more distributed validators on the Ethereum network. + +Once the ceremony is complete, all participants should take a backup of the created files. In future versions of Charon, if a participant loses access to these key shares, it will be possible to use a key re-sharing protocol to swap the participants old keys out of a distributed validator in favor of new keys, allowing the rest of a cluster to recover from a set of lost key shares. However for now, without a backup, the safest thing to do would be to exit the validator. + +## DKG Verification + +For many use cases of distributed validators, the funder/depositor of the validator may not be the same person as the key creators/node operators, as (outside of the base protocol) stake delegation is a common phenomenon. This handover of information introduces a point of trust. How does someone verify that a proposed validator `deposit data` corresponds to a real, fair, DKG with participants the depositor expects? + +There are a number of aspects to this trust surface that can be mitigated with a "Don't trust, verify" model. Verification for the time being is easier off chain, until things like a [BLS precompile](https://eips.ethereum.org/EIPS/eip-2537) are brought into the EVM, along with cheap ZKP verification on chain. Some of the questions that can be asked of Distributed Validator Key Generation Ceremonies include: + +- Do the public key shares combine together to form the group public key? + - This can be checked on chain as it does not require a pairing operation + - This can give confidence that a BLS pubkey represents a Distributed Validator, but does not say anything about the custody of the keys. (e.g. Was the ceremony sybil attacked, did they collude to reconstitute the group private key etc.) +- Do the created BLS public keys attest to their `cluster_definition_hash`? + - This is to create a backwards link between newly created BLS public keys and the operator's eth1 addresses that took part in their creation. + - If a proposed distributed validator BLS group public key can produce a signature of the `cluster_definition_hash`, it can be inferred that at least a threshold of the operators signed this data. + - As the `cluster_definition_hash` is the same for all distributed validators created in the ceremony, the signatures can be aggregated into a group signature that verifies all created group keys at once. This makes it cheaper to verify a number of validators at once on chain. +- Is there either a VSS or PVSS proof of a fair DKG ceremony? + - VSS (Verifiable Secret Sharing) means only operators can verify fairness, as the proof requires knowledge of one of the secrets. + - PVSS (Publicly Verifiable Secret Sharing) means anyone can verify fairness, as the proof is usually a Zero Knowledge Proof. + - A PVSS of a fair DKG would make it more difficult for operators to collude and undermine the security of the Distributed Validator. + - Zero Knowledge Proof verification on chain is currently expensive, but is becoming achievable through the hard work and research of the many ZK based teams in the industry. + +## Appendix + +### Sample Configuration and Lock Files + +Refer to the details [here](../charon/cluster-configuration.md). diff --git a/versioned_docs/version-v1.2.0/learn/charon/intro.md b/versioned_docs/version-v1.2.0/learn/charon/intro.md new file mode 100644 index 0000000000..cedf4b913b --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/intro.md @@ -0,0 +1,89 @@ +--- +description: Charon - The Distributed Validator Client +sidebar_position: 1 +--- + +# Introduction to Charon + +This section introduces and outlines the Charon *[kharon]* middleware, Obol's implementation of DVT. Please see the [key concepts](../../learn/intro/key-concepts.md) section as background and context. + +## What is Charon? + +Charon is a GoLang-based, HTTP middleware built by Obol to enable any existing Ethereum validator clients to operate together as part of a distributed validator. + +Charon sits as a middleware between a normal validating client and its connected beacon node, intercepting and proxying API traffic. Multiple Charon clients are configured to communicate together to come to consensus on validator duties and behave as a single unified proof-of-stake validator together. The nodes form a cluster that is *byzantine-fault tolerant* and continues to progress assuming a supermajority of working/honest nodes is met. + +![Charon Cluster](/img/DVCluster.png) + +## Charon Architecture + +Charon is an Ethereum proof of stake distributed validator (DV) client. Like any validator client, its main purpose is to perform validation duties for the Beacon Chain, primarily attestations and block proposals. The beacon client handles a lot of the heavy lifting, leaving the validator client to focus on fetching duty data, signing that data, and submitting it back to the beacon client. + +Charon is designed as a generic event-driven workflow with different components coordinating to perform validation duties. All duties follow the same flow, the only difference being the signed data. The workflow can be divided into phases consisting of one or more components: + +![Charon Workflow](/img/workflow.jpg) + +### Determine **when** duties need to be performed + +The beacon chain is divided into [slots](https://eth2book.info/capella/part3/config/types/#slot) and [epochs](https://eth2book.info/capella/part3/config/types/#epoch), which divides it into deterministically fixed-size time chunks. +The first step is to determine when (which slot/epoch) duties need to be performed. This is done by the `scheduler` component. +It queries the beacon node to detect which validators defined in the cluster lock are active, and what duties they need to perform for +the upcoming epoch and slots. When such a slot starts, the `scheduler` emits an event indicating which validator needs to perform what duty. + +### Fetch and come to consensus on **what** data to sign + +A DV cluster consists of multiple operators each provided with one of the M-of-N threshold BLS private key shares per validator. +The key shares are imported into the validator clients which produce partial signatures. +Charon threshold aggregates these partial signatures before broadcasting them to the Beacon Chain. +*But to threshold aggregate partial signatures, each validator must sign the same data.* +The cluster must therefore coordinate and come to a consensus on what data to sign. + +`Fetcher` fetches the unsigned duty data from the beacon node upon receiving an event from `Scheduler`. +For attestations, this is the unsigned attestation, for block proposals, this is the unsigned block. + +The `Consensus` component listens to events from Fetcher and starts a [QBFT](https://docs.goquorum.consensys.net/configure-and-manage/configure/consensus-protocols/qbft/) consensus game with the other +Charon nodes in the cluster for that specific duty and slot. +When consensus is reached, the resulting unsigned duty data is stored in the `DutyDB`. + +### **Wait** for the VC to sign + +Charon is a **middleware** distributed validator client. That means Charon doesn’t have access to the +validator private key shares and cannot sign anything on demand. +Instead, operators import the key shares into industry-standard validator clients (VC) +that are configured to connect to their local Charon client instead of their local Beacon node directly. + +Charon, therefore, serves the [Ethereum Beacon Node API](https://ethereum.github.io/beacon-APIs/#/) from the `ValidatorAPI` component and +intercepts some endpoints while proxying other endpoints directly to the upstream Beacon node. + +The VC queries the `ValidatorAPI` for unsigned data which is retrieved from the `DutyDB`. It then signs it and submits it +back to the `ValidatorAPI` which stores it in the `PartialSignatureDB`. + +### **Share** partial signatures + +The `PartialSignatureDB` stores the partially signed data submitted by the local Charon client’s VC. +But it also stores all the partial signatures submitted by the VCs of other peers in the cluster. +This is achieved by the `PartialSignatureExchange` component that exchanges partial signatures between all peers in the cluster. +All Charon clients, therefore, store all partial signatures the cluster generates. + +### **Threshold Aggregate** partial signatures + +The `SignatureAggregator` is invoked as soon as sufficient (any M of N) partial signatures are stored in the `PartialSignatureDB`. +It performs BLS threshold aggregation of the partial signatures resulting in a final signature that is valid for the beacon chain. + +### **Broadcast** final signature + +Finally, the `Broadcaster` component broadcasts the final threshold aggregated signature to the Beacon client, thereby completing the duty. + +### Ports + +The following is an outline of the services that can be exposed by Charon. + +- **:3600** - The validator REST API. This is the port that serves the consensus layer's [beacon node API](https://ethereum.github.io/beacon-APIs/). This is the port validator clients should talk to instead of their standard consensus client REST API port. Charon subsequently proxies these requests to the upstream consensus client specified by `--beacon-node-endpoints`. + +- **:3610** - Charon P2P port. This is the port that Charon clients use to communicate with one another via TCP. This endpoint should be port-forwarded on your router and exposed publicly, preferably on a static IP address. This IP address should then be set on the charon run command with `--p2p-external-ip` or `CHARON_P2P_EXTERNAL_IP`. + +- **:3620** - Monitoring port. This port hosts a webserver that serves Prometheus metrics on `/metrics`, a readiness endpoint on `/readyz` and a liveness endpoint on `/livez`, and a pprof server on `/debug/pprof`. This port should not be exposed publicly. + +## Getting started + +For more information on running Charon, take a look at our [Quickstart Guides](../../run/start/quickstart_overview.md). diff --git a/versioned_docs/version-v1.2.0/learn/charon/networking.mdx b/versioned_docs/version-v1.2.0/learn/charon/networking.mdx new file mode 100644 index 0000000000..16eb429cac --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/charon/networking.mdx @@ -0,0 +1,102 @@ +--- +description: Networking +sidebar_position: 4 +--- + +# Charon networking + +## Overview + +This document describes Charon's networking model which can be divided into two parts: the [*internal validator stack*](#internal-validator-stack) and the [*external p2p network*](#external-p2p-network). + +## Internal Validator Stack + +Internal Validator Stack
+ +Charon is a middleware DVT client and is therefore connected to an upstream beacon node and a downstream validator client is connected to it. +Each operator should run the whole validator stack (all 4 client software types), either on the same machine or on different machines. The networking between +the nodes should be private and not exposed to the public internet. + +Related Charon configuration flags: + +- `--beacon-node-endpoints`: Connects Charon to one or more beacon nodes. +- `--validator-api-address`: Address for Charon to listen on and serve requests from the validator client. + +## External P2P Network + +![External P2P Network](/img/ExternalP2PNetwork.png) +The Charon clients in a DV cluster are connected to each other via a small p2p network consisting of only the clients in the cluster. Peer IP addresses are +discovered via an external "relay" server. The p2p connections are over the public internet so the Charon p2p port must be publicly accessible. Charon leverages +the popular [libp2p](https://libp2p.io/) protocol. + +Related [Charon configuration flags](../../learn/charon/charon-cli-reference.md): + +- `--p2p-tcp-addresses`: Addresses for Charon to listen on and serve p2p requests. +- `--p2p-relays`: Connect Charon to one or more relay servers. +- `--private-key-file`: Private key identifying the Charon client. + +### LibP2P Authentication and Security + +Each Charon client has a secp256k1 private key. The associated public key is encoded into the [cluster lock file](cluster-configuration.md#cluster-lock-file) to identify the nodes in the cluster. +For ease of use and to align with the Ethereum ecosystem, Charon encodes these public keys in the [ENR format](https://eips.ethereum.org/EIPS/eip-778), +not in [libp2p’s Peer ID format](https://docs.libp2p.io/concepts/fundamentals/peers/). + +:::warning +Each Charon node's secp256k1 private key is critical for authentication and must be kept secure to prevent cluster compromise. + +Do not use the same key across multiple clusters, as this can lead to security issues. + +For more on p2p security, refer to [libp2p's article](https://docs.libp2p.io/concepts/security/security-considerations). +::: + +Charon currently only supports libp2p tcp connections with [noise](https://noiseprotocol.org/) security and only accepts incoming libp2p connections from peers defined in the cluster lock. + +### LibP2P Relays and Peer Discovery + +Relays are simple libp2p servers that are publicly accessible supporting the [circuit-relay](https://docs.libp2p.io/concepts/nat/circuit-relay/) protocol. +Circuit-relay is a libp2p transport protocol that routes traffic between two peers over a third-party “relay” peer. + +Obol hosts a publicly accessible relay at https://0.relay.obol.tech and will work with other organisations in the community to host alternatives Anyone can host their own relay server for their DV cluster. + +Each Charon node knows which peers are in the cluster from the ENRs in the cluster lock file, but their IP addresses are unknown. By connecting to the same relay, +nodes establish “relay connections” to each other. Once connected via relay they exchange their known public addresses via libp2p’s [identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) +protocol. The relay connection is then upgraded to a direct connection. If a node’s public IP changes, nodes once again connect via relay, exchange the new IP, and then connect directly once again. + +Note that in order for two peers to discover each other, they must connect to the same relay. Cluster operators should therefore coordinate which relays to use. + +Libp2p’s [identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify) protocol attempts to automatically detect the public IP address of a Charon +client without the need to explicitly configure it. If this however fails, the following two configuration flags can be used to explicitly set the publicly advertised +address: + +- `--p2p-external-ip`: Explicitly sets the external IP address. +- `--p2p-external-hostname`: Explicitly sets the external DNS host name. + +:::warning +If a pair of Charon clients are not publicly accessible, due to being behind a NAT, they will not be able to upgrade their relay connections to a direct connection. +Even though this is supported, it isn’t recommended as relay connections introduce additional latency and reduced throughput and will result in decreased validator effectiveness +and possible missed block proposals and attestations. +::: + +Libp2p’s circuit-relay connections are end-to-end encrypted, even though relay servers accept connections between nodes from multiple different clusters, relays are merely +routing opaque connections. And since Charon only accepts incoming connections from other peers in its cluster, the use of a relay doesn’t allow connections between clusters. + +Only the following three libp2p protocols are established between a Charon node and a relay itself: +- [circuit-relay](https://docs.libp2p.io/concepts/nat/circuit-relay/): To establish relay e2e encrypted connections between two peers in a cluster.
+- [identify](https://docs.libp2p.io/concepts/fundamentals/protocols/#identify): Auto-detection of public IP addresses to share with other peers in the cluster.
+- [peerinfo](https://github.com/ObolNetwork/charon/blob/main/app/peerinfo/peerinfo.go): Exchanges basic application [metadata](https://github.com/ObolNetwork/charon/blob/main/app/peerinfo/peerinfopb/v1/peerinfo.proto) for improved operational metrics and observability.
+ +All other Charon protocols are only established between nodes in the same cluster. + +### Scalable Relay Clusters + +In order for a Charon client to connect to a relay, it needs the relay's [multiaddr](https://docs.libp2p.io/concepts/fundamentals/addressing/) (containing its public key and IP address). +But a single multiaddr can only point to a single relay server which can easily be overloaded if too many clusters connect to it. Charon therefore supports resolving a relay’s multiaddr +via HTTP GET request. Since Charon also includes the unique `cluster-hash` header in this request, the relay provider can use +[consistent header-based load-balancing](https://cloud.google.com/load-balancing/docs/https/traffic-management-global#traffic_steering_header-based_routing) to map clusters to one of many relays using a single HTTP address. + +The relay supports serving its runtime public multiaddrs via its `--http-address` flag. + +E.g., https://0.relay.obol.tech is actually a load-balancer that routes HTTP requests to one of many relays based on the `cluster-hash` header returning the target relay’s multiaddr +which the Charon client then uses to connect to that relay. + +The charon `--p2p-relays` flag therefore supports both multiaddrs as well as HTTP URls. diff --git a/versioned_docs/version-v1.2.0/learn/futher-reading/_category_.json b/versioned_docs/version-v1.2.0/learn/futher-reading/_category_.json new file mode 100644 index 0000000000..c2df0de9cb --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/futher-reading/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Further reading", + "position": 3, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/learn/futher-reading/ethereum_and_dvt.md b/versioned_docs/version-v1.2.0/learn/futher-reading/ethereum_and_dvt.md new file mode 100644 index 0000000000..7a62ccaa77 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/futher-reading/ethereum_and_dvt.md @@ -0,0 +1,54 @@ +--- +sidebar_position: 1 +description: Ethereum and its relationship with DVT +--- + +# Ethereum and Its Relationship With DVT + +Our goal for this page is to equip you with the foundational knowledge needed to actively contribute to the advancement of Obol while also directing you to valuable Ethereum and DVT related resources. Additionally, we will shed light on the intersection of DVT and Ethereum, offering curated articles and blog posts to enhance your understanding. + +## **Understanding Ethereum** + +To grasp the current landscape of Ethereum's PoS development, we encourage you to delve into the wealth of information available on the [Official Ethereum Website.](https://ethereum.org/en/learn/) +The Ethereum website serves as a hub for all things Ethereum, catering to individuals at various levels of expertise, whether you're just starting your journey or are an Ethereum veteran. Here, you'll find a trove of resources that cater to diverse learning needs and preferences, ensuring that there's something valuable for everyone in the Ethereum community to discover. + +## **DVT & Ethereum** + +### Distributed Validator Technology + +> "Distributed validator technology (DVT) is an approach to validator security that spreads out key management and signing responsibilities across multiple parties, to reduce single points of failure, and increase validator resiliency. +> +> It does this by splitting the private key used to secure a validator across many computers organized into a "cluster". The benefit of this is that it makes it very difficult for attackers to gain access to the key, because it is not stored in full on any single machine. It also allows for some nodes to go offline, as the necessary signing can be done by a subset of the machines in each cluster. This reduces single points of failure from the network and makes the whole validator set more robust." (ethereum.org, 2023) + +#### Learn More About Distributed Validator technology from [The Official Ethereum Website](https://ethereum.org/en/staking/dvt/) + +### How Does DVT Improve Staking on Ethereum? + +If you haven’t yet heard, Distributed Validator Technology, or DVT, is the next big thing on The Merge section of the Ethereum roadmap. Learn more about this in our blog post: [What is DVT and How Does It Improve Staking on Ethereum?](https://blog.obol.tech/what-is-dvt-and-how-does-it-improve-staking-on-ethereum/) + +Image Alt Text + +***Vitalik's Ethereum Roadmap*** + +### Deep Dive Into DVT and Charon’s Architecture + +Minimizing correlation is vital when designing DVT as Ethereum Proof of Stake is designed to heavily punish correlated behavior. In designing Obol, we’ve made careful choices to create a trust-minimized and non-correlated architecture. + +[**Read more about Designing Non-Correlation Here**](https://blog.obol.tech/deep-dive-into-dvt-and-charons-architecture/) + +### Performance Testing Distributed Validators +In our mission to help make Ethereum consensus more resilient and decentralised with distributed validators (DVs), it’s critical that we do not compromise on the performance and effectiveness of validators. Earlier this year, we worked with MigaLabs, the blockchain ecosystem observatory located in Barcelona, to perform an independent test to validate the performance of Obol DVs under different configurations and conditions. After taking a few weeks to fully analyse the results together with MigaLabs, we’re happy to share the results of these performance tests. + +[**Read More About The Performance Test Results Here**](https://blog.obol.tech/performance-testing-distributed-validators/) + +Image Alt Text + +### More Resources + +- [Sorting out Distributed Validator Technology](https://medium.com/nethermind-eth/sorting-out-distributed-validator-technology-a6f8ca1bbce3) +- [A tour of Verifiable Secret Sharing schemes and Distributed Key Generation protocols](https://medium.com/nethermind-eth/a-tour-of-verifiable-secret-sharing-schemes-and-distributed-key-generation-protocols-3c814e0d47e1) +- [Threshold Signature Schemes](https://medium.com/nethermind-eth/threshold-signature-schemes-36f40bc42aca) + +#### References + +- ethereum.org. (2023). Distributed Validator Technology. [online] Available at: https://ethereum.org/en/staking/dvt/ [Accessed 25 Sep. 2023]. diff --git a/versioned_docs/version-v1.2.0/learn/futher-reading/peer_score.md b/versioned_docs/version-v1.2.0/learn/futher-reading/peer_score.md new file mode 100644 index 0000000000..8cde00e6d1 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/futher-reading/peer_score.md @@ -0,0 +1,47 @@ +--- +sidebar_position: 3 +description: Measuring Individual Performance in Distributed Validators +--- + +# Peer Score + +## Introduction + +Validator effectiveness is a critical metric for assessing the health of a rated network. It determines how well validators perform their attestation and block proposal duties. Existing solutions, like RAVER (Rated Validator Effectiveness Rating), provide a effectiveness score of a validator. In a monolithic validator that is run by a single operator, validator effectiveness can be considered as a proxy for the effectiveness or “score” of that operator. However, this approach falls short when dealing with distributed validators (DVs) maintained by multiple operators. + +Peer Score v0 addresses this limitation by introducing a method to evaluate the performance of individual operators within a DV. This enables a more granular assessment of contribution within a distributed setting. + +## Key Concepts + +- **Distributed Validator (DV):** A validator maintained by a group of operators in a fault-tolerant manner. +- **Peer:** An individual operator contributing to a DV. +- **Peer Score:** A metric reflecting the performance of a peer within a DV, calculated as the ratio of completed duties to expected duties. +- **Operator Score:** An aggregated metric representing the overall effectiveness of an operator across multiple DVs (planned for future iterations). + +## Challenges with RAVER in DVs + +RAVER assigns a single effectiveness score to the entire DV. This score doesn't reflect the individual contributions of operators within the group. For example, a DV with 95% effectiveness maintained by four operators (A, B, C, and D) doesn't guarantee that each operator has a 95% effectiveness score. It's possible that even if operator D is frequently offline, the remaining operators (A, B, and C) can maintain the overall DV effectiveness. + +## Peer Score v0 Calculation + +Peer Score v0 utilizes a straightforward formula: + +`Peer Score = (Total duties completed by peer) / (Total duties expected by peer)` + +This ratio reflects the peer's adherence to its assigned duties within the DV. + +## Future Iterations + +Peer Score v0 lays the foundation for a more comprehensive evaluation system. Planned advancements include: + +- **Weighted Duties:** Assigning varying weights to different duties based on their significance to the network. +- **Decentralization Scores:** Integrating metrics that consider the decentralization of clients and operator locations. +- Peer rating: an anonymous rating peers can give to their other peers to grade their social co-ordination. + +## Use Cases + +Peer Score offers valuable insights for various stakeholders: + +- **Staking/Restaking Protocols:** Peer Score is crucial component of Obol’s Techne Credential Program. LSPs and LRPs can utilize Techne Credentials ,and hence Peer Score, to identify efficient operators for expanding their operator sets. +- **DV Operators:** Forming operator collectives based on peer effectiveness and potentially removing underperforming peers from DVs (with Charon v2 cluster mutability). +- **DV Software Developers:** Establishing a standardized metric for evaluating operator performance across various DV software, enabling the development of new tools and services. diff --git a/versioned_docs/version-v1.2.0/learn/futher-reading/resources.md b/versioned_docs/version-v1.2.0/learn/futher-reading/resources.md new file mode 100644 index 0000000000..c0e1487097 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/futher-reading/resources.md @@ -0,0 +1,54 @@ +--- +sidebar_position: 4 +description: A collection of links to products and content relating to Distributed Validators. +--- + +# Useful Links +The following is a curated list of the best internal and external resources for using, creating, running, building, and researching Distributed Validators. To add to this list, please open a [pull request](https://github.com/ObolNetwork/obol-docs/pulls/). + + +## Deposit Interfaces +- [Chorus One](https://opus.chorus.one/pool/stake/) +- [Stakely](https://obol-portal.stakely.io/) +- [Mellow](https://app.mellow.finance/restake/ethereum-dvsteth) + + +## Launchers and Deployment Tooling + +- [Dappnode](https://docs.dappnode.io/docs/user/staking/ethereum/dvt-technologies/obol-network/) +- [Stereum](https://stereum.net/) +- [Sedge](https://github.com/ObolNetwork/sedge/blob/develop/docs/docs/quickstart/charon.mdx) +- [Obol CDVN](https://github.com/ObolNetwork/charon-distributed-validator-node) +- [Obol K8s](https://github.com/ObolNetwork/charon-k8s-distributed-validator-node) +- [Obol Helm Charts](https://github.com/ObolNetwork/helm-charts) +- [Obol Ansible Playbooks](https://github.com/ObolNetwork/obol-ansible) +- [Terraform Charon Relay](https://github.com/ObolNetwork/terraform-charon-relay) +- [Terraform Grafana Charon dashboards](https://github.com/ObolNetwork/terraform-grafana-dashboards) + +## Quickstart Guides +- [Run a DV alone](../../../docs/run/start/quickstart_alone.mdx) +- [Run a DV as a group](../../../docs/run/start/quickstart_group.mdx) +- [Run a DV using the SDK](../../../docs/adv/advanced/quickstart-sdk.mdx) + +## Security and Best Practices +- [Audits](https://github.com/ObolNetwork/obol-security/tree/main/audits) +- [Security repo](https://github.com/ObolNetwork/obol-security) +- [Security Docs Page](../../../docs/adv/security/overview.md) +- [Best practices doc](../../../docs/run/prepare/deployment-best-practices.md) +- [Status Page](https://status.obol.org/) + +## Security Audits and Assessments +- A [review](../../../docs/adv/security/ev-assessment.md) of Obol Labs development processes by Ethereal Ventures +- A [security assessment](https://github.com/ObolNetwork/obol-security/blob/f9d7b0ad0bb8897f74ccb34cd4bd83012ad1d2b5/audits/Sigma_Prime_Obol_Network_Charon_Security_Assessment_Report_v2_1.pdf) of Charon by [Sigma Prime](https://sigmaprime.io/). +- A [solidity audit](../../../docs/adv/security/smart_contract_audit.mdx) of the Obol Splits contracts by [Zach Obront](https://zachobront.com/). +- [Charon Threat model](../../../docs/adv/security/threat_model.md) +- [QuantStamp Charon audit Q4 2023](https://obol.tech/charon_quantstamp_assessment.pdf) + +## Research and Development +- Nethermind research papers via the [Obol Network Research Forum](https://community.obol.tech/?ref=blog.obol.org) + - [Publicly Verifiable Secret Sharing-based Distributed Key Generation](https://community.obol.tech/t/proposal-publicly-verifiable-secret-sharing-based-distributed-key-generation/94?ref=blog.obol.org) + - [Key Refresh Scheme for DV operators](https://community.obol.tech/t/proposal-key-refresh-scheme-for-dv-operators/97?ref=blog.obol.org) + - [BFT protocol that can mutate its operator set in a byzantine setting](https://community.obol.tech/t/proposal-bft-protocol-that-can-mutate-its-operator-set-in-a-byzantine-setting/106?ref=blog.obol.org) + - [Using DV clusters for encrypted transaction mempools](https://community.obol.tech/t/proposal-using-dv-clusters-for-encrypted-transaction-mempools/108?ref=blog.obol.org) + - Attributable Consensus Solution for DV Clusters [Part I](https://community.obol.org/t/proposal-attributable-consensus-solution-for-dv-clusters/104?ref=blog.obol.org), [Part II](https://community.obol.org/t/proposal-attributable-consensus-solution-for-dv-clusters-part-2/107?ref=blog.obol.org), [Part III](https://community.obol.org/t/proposal-attributable-consensus-solution-for-dv-clusters-part-3/109?ref=blog.obol.org), [Appendix](https://community.obol.org/t/proposal-attributable-consensus-solution-for-dv-clusters-appendix/110?ref=blog.obol.org) +- [Obol-Lido Splits Dune Dashboard](https://dune.com/obol_labs/lido-splits) \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/learn/futher-reading/testnet.md b/versioned_docs/version-v1.2.0/learn/futher-reading/testnet.md new file mode 100644 index 0000000000..c763618420 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/futher-reading/testnet.md @@ -0,0 +1,189 @@ +--- +sidebar_position: 2 +description: Community testing efforts +--- + +# Community Testing + +:::tip +This page looks at the community testing efforts organised by Obol to test Distributed Validators at scale. If you are looking for guides to run a Distributed Validator on testnet you can do so [here](../../run/start/quickstart_overview.md). +::: + +Over the last number of years, DV Labs has coordinated and hosted progressively larger testing efforts to help harden the Charon client and iterate on the key generation tooling. + +Below is a breakdown of the testing initiatives, the features targeted for completion in each testnet, along with their respective completion dates and durations. + +# Testing Programs on Testnet + +Listed from most recent to oldest: + +- [x] [Lido Testnet 3 - SimpleDVT](#lido-testnet-wave-3---simpledvt) +- [x] [Lido Testnet 2](#lido-testnet-wave-2) +- [x] [Lido Testnet 1 - Pilot](#lido-testnet-wave-1---pilot) +- [x] [Bia Public Testnet 2](#bia-public-testnet-2) +- [x] [Athena Public Testnet 1](#athena-public-testnet-1) +- [x] [Dev Net 2](#devnet-2) +- [x] [Dev Net 1](#devnet-1) + +## Lido Testnet Wave 3 - SimpleDVT + +Official report [available here](https://obol.org/lido_obol_3.pdf). The metrics presented were derived from a 45-day monitoring period starting on November 18th, 2023. Each cluster initially ran 5 validators, a number that was subsequently scaled up to 50 and then 100 for most clusters. Throughout the testing, various challenges were encountered, primarily stemming from infrastructure limitations due to the early-stage development of the Holesky testnet. Throughout this monitoring phase, the aggregate metrics of Obol DVT clusters surpassed both Lido’s minimum requirements and the Holesky network averages. + +**Participants:** Professional and community operators. Initially 214 participants, which later fell to 196. + +**State:** Pre-release + +**Network:** Holesky + +**Completed date:** Jan 11th, 2024 + +**Duration:** 2 months (Nov 28th, 2023 - Jan 11th, 2024) + +**Goals:** + +- Engage a broad set of node operators. +- Operate a high number of validators on each cluster. +- Gather performance data on potential candidates for Lido SimpleDVT onboarding. +- Conduct large-scale testing within Lido's framework. +- Demonstrate good performance, even with a large degree of geographic, client, and hardware diversity. + +## Lido Testnet Wave 2 + +Official report [available here](https://obol.org/lido_obol_2.pdf). Our testing period spanned 59 days, from March 23rd to May 20th, 2023. During this time, we focused on key metrics for our Lido clusters, also drawing comparisons with industry peers. We're excited to share that the data displays strong performance of our DVT clusters as we continue to improve and enhance our middleware client, Charon. + +**Participants:** >50 community professional and community node operators. + +**State:** MVP + +**Network:** Görli + +**Completed date:** May 2023 + +**Duration:** 3 months (March - May 2023) + +**Goals:** + +- Engage a broad set of node operators. +- Conduct large-scale testing within Lido's framework. +- Demonstrate good performance, even with a large degree of geographic, client, and hardware diversity. + +## Lido Testnet Wave 1 - Pilot + +Offical report [available here](https://obol.org/lido_obol_1.pdf).Gathered key metrics from our Lido clusters, benchmarking these metrics against other +industry players, showing strong results and reaffirming our confidence in the +future of the technology. + +**Participants:** Professional node operators: Hashquark, CryptoManufaktur, Nethermind, Simply Staking, DSRV, Kukis Global, Chorus One, Staking Facilities, Blockscape, Everstake, Stakely. + +**State:** MVP + +**Network:** Görli + +**Completed date:** January 2023 + +**Duration:** 104 days (Oct 3rd, 2022 - Jan 15th, 2023) + +**Goals:** + +- Engage Lido and Lido node operators with DVT. +- Assist Lido to build out a testing program framework with can be repeated at a larger scale. +- Test up to 1000 active validators within each cluster. + +## Bia Public Testnet 2 + +This second public testnet intends to take the learning from Athena and scale the network by engaging both the wider at-home validator community and professional operators. This is the first time users are setting up DVs using the DV launchpad. + +This testnet is also important for learning the conditions Charon will be subjected to in production. A core output of this testnet is a large number of autonomous public DV clusters running and building up the Obol community with technical ambassadors. + +**Participants:** Obol Community, Ethereum staking community + +**State:** MVP + +**Network:** Görli + +**Completed date:** March 2023 + +**Duration:** 2 weeks cluster setup, 4-8 weeks operation + +**Goals:** + +- Engage the wider Solo and Professional Ethereum Staking Community. +- Get integration feedback. +- Build confidence in Charon after running DVs on an Ethereum testnet. +- Learn about the conditions Charon will be subjected to in production. +- Distributed Validator returns are competitive versus single validator clients. +- Make deploying Ethereum validator nodes accessible using the DV Launchpad. +- Build comprehensive guides for various profiles to spin up DVs with minimal supervision from the core team. + +## Athena Public Testnet 1 + +With tutorials for solo and group flows having been developed and refined. The goal for public testnet 1 was to get distributed validators into the hands of the wider Obol Community for the first time. The core focus of this testnet was the onboarding experience. + +The core output from this testnet was a significant number of public cluster running and public feedback collected. + +This was an unincentivized testnet and formed the basis for us to figure out a Sybil resistance mechanism. + +**Participants:** Obol Community + +**State:** Bare Minimum + +**Network:** Görli + +**Completed date:** October 2022 + +**Duration:** 2 weeks cluster setup, 8 weeks operation + +**Goals:** + +- Get distributed validators into the hands of the Obol Early Community for the first time. +- Create the first public onboarding experience and gather feedback. This is the first time we need to provide comprehensive instructions for as many platforms (Unix, Mac, Windows) as possible. +- Make deploying Ethereum validator nodes accessible using the CLI. +- Generate a backlog of bugs, feature requests, platform requests and integration requests. + +## Devnet 2 + +The second devnet aimed to have a number of trusted operators test out our earliest tutorial flows **together** for the first time. + +The aim was for groups of 4 testers to complete a group onboarding tutorial, using `docker compose` to spin up 4 Charon clients and 4 different validator clients (Lighthouse, Teku, Lodestar and Vouch), each on their own machine located either at the operator's home or a location of their choice, while running at least a kiln consensus client. + +This devnet was the first time `charon dkg` was tested with users. A core focus of this devnet was to collect network performance data. + +This was also the first time Charon was run in variable, non-virtual networks (i.e. the real internet). + +**Participants:** Obol Dev Team, Client team advisors. + +**State:** Pre-product + +**Network:** Kiln + +**Completed Date:** July 2022 + +**Duration:** 2 weeks + +**Goals:** + +- Groups of 4 testers complete a group onboarding tutorial, using `docker compose` to spin up 4 Charon clients, each on their own machine located either at the operator's home or a location of their choice, while running at least a kiln consensus client. +- Operators avoid exposing Charon to the public internet on a static IP address through the use of Obol-hosted relay nodes. +- Users test `charon dkg`. The launchpad is not used, and this dkg is triggered by a manifest config file created locally by a single operator using the `charon create dkg` command. +- Effective collection of network performance data, to enable gathering even higher signal performance data at scale during public testnets. +- Block proposals are in place. + +## Devnet 1 + +The first devnet aimed to have a number of trusted operators test out our earliest tutorial flows. The aim was for a single user to complete the tutorials alone, using `docker compose` to spin up 4 Charon clients, and 4 different validator clients on a single machine, using a remote consensus client. The keys were created locally in Charon and activated with the existing launchpad. + +**Participants:** Obol Dev Team, Client team advisors. + +**State:** Pre-product + +**Network:** Kiln + +**Completed Date:** June 2022 + +**Duration:** 1 week + +**Goals:** + +- A single user completes the first tutorial alone, using `docker compose` to spin up 4 Charon clients on a single machine, with a remote consensus client. The keys are created locally in Charon and activated with the existing launchpad. +- Prove that the distributed validator paradigm with 4 separate VC implementations together operating as one logical validator works. +- Establish basic monitoring systems in preparation for the next testnet, where accurate monitoring will be crucial as Charon operates across a network. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/learn/intro/_category_.json b/versioned_docs/version-v1.2.0/learn/intro/_category_.json new file mode 100644 index 0000000000..baa4b331b2 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Introduction", + "position": 1, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/learn/intro/faq.mdx b/versioned_docs/version-v1.2.0/learn/intro/faq.mdx new file mode 100644 index 0000000000..fe4f9631d7 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/faq.mdx @@ -0,0 +1,189 @@ +--- +sidebar_position: 6 +description: Frequently asked questions +--- + +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# Frequently Asked Questions + +## General + +### Does Obol have a token? + +No. Distributed validators use only Ether. + +### Where can I learn more about Distributed Validators? + +Have you checked out our [blog site](https://blog.obol.tech) and [twitter](https://twitter.com/ObolNetwork) yet? Maybe join our [discord](https://discord.gg/n6ebKsX46w) too. + +### Where does the name Charon come from? + +[Charon](https://www.theoi.com/Khthonios/Kharon.html) [kharon] is the Ancient Greek Ferryman of the Dead. He was tasked with bringing people across the Acheron river to the underworld. His fee was one Obol coin, placed in the mouth of the deceased. This tradition of placing a coin or Obol in the mouth of the deceased continues to this day across the Greek world. + +### What are the hardware requirements for running a Charon node? + +Charon alone uses negligible disk space of not more than a few MBs. However, if you are running your consensus client and execution client on the same server as Charon, then you will typically need the same hardware as running a full Ethereum node: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Charon + VCBeacon Node
CPU*12
RAM216
Storage100 MB2 TB
Internet Bandwidth10 Mb/s10 Mb/s
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
Charon + VCBeacon Node
CPU*24
RAM324
Storage100 MB2 TB
Internet Bandwidth25 Mb/s25 Mb/s
+
+ + + + + + + + + + + + + + + + + + + + + + + + + + + +
Charon + VCBeacon Node
CPU*28
RAM432
Storage100 MB2 TB
Internet Bandwidth100 Mb/s100 Mb/s
+
+
+ +*if using vCPU, aim for 2x the above amounts + +For more hardware considerations, check out the [ethereum.org guides](https://ethereum.org/en/developers/docs/nodes-and-clients/run-a-node/#environment-and-hardware) which explores various setups and trade-offs, such as running the node locally or in the cloud. + +For now, Geth, Teku & Lighthouse clients are packaged within the docker compose file provided in the [quickstart guides](../../run/start/quickstart_overview.md), so you don't have to install anything else to run a cluster. Just make sure you give them some time to sync once you start running your node. + +### What is the difference between a node, a validator and a cluster? + +A node is a single instance of Ethereum EL+CL clients that can communicate with other nodes to maintain the Ethereum blockchain. + +A validator is a node that participates in the consensus process by verifying transactions and creating new blocks. Multiple validators can run from the same node. + +A cluster is a group of nodes that act together as one or several validators which allows for a more efficient use of resources, reduces operational costs, and provides better reliability and fault tolerance. + +### Can I migrate an existing Charon node to a new machine? + +It is possible to migrate your Charon node to another machine running the same config by moving the `.charon` folder with its contents to your new machine. Make sure the EL and CL on the new machine are synced before proceeding to the move to minimize downtime. + +## Distributed Key Generation + +### What are the min and max numbers of operators for a Distributed Validator? + +Currently, the minimum is 4 operators with a threshold of 3. + +The threshold (aka quorum) corresponds to the minimum numbers of operators that need to be active for the validator(s) to be able to perform its duties. It is defined by the following formula `n-(ceil(n/3)-1)`. We strongly recommend using this default threshold in your DKG as it maximises liveness while maintaining BFT safety. Setting a 4 out of 4 cluster for example, would make your validator more vulnerable to going offline instead of less vulnerable. You can check the recommended threshold values for a cluster [here](../../learn/intro/key-concepts.md#distributed-validator-threshold). + +## Obol Splits + +### What are Obol Splits? + +Obol Splits refers to a collection of composable smart contracts that enable the splitting of validator rewards and/or principal in a non-custodial, trust-minimised manner. Obol Splits contains integrations to enable DVs within Lido, Eigenlayer, and in future a number of other LSPs. + +### Are Obol Splits non-custodial? + +Yes. Unless you were to decide to [deploy an editable splitter contract](#can-i-change-the-percentages-in-a-split), Obol Splits are immutable, non-upgradeable, non-custodial, and oracle-free. + +### Can I change the percentages in a split? + +Generally Obol Splits are deployed in an immutable fashion, meaning you cannot edit the percentages after deployment. However, if you were to choose to deploy a *controllable* splitter contract when creating your Split, then yes, the address you select as controller can update the split percentages arbitrarily. A common pattern for this use case is to use a Gnosis SAFE as the controller address for the split, giving a group of entities (usually the operators and principal provider) the ability to update the percentages if need be. A well known example of this pattern is the [Protocol Guild](https://protocol-guild.readthedocs.io/en/latest/03-onchain-architecture.html#). + +### How do Obol Splits work? + +You can read more about how Obol Splits work [here](../../learn/intro/obol-splits.mdx). + +### Are Obol Splits open source? + +Yes, Obol Splits are licensed under GPLv3 and the source code is available [here](https://github.com/ObolNetwork/obol-splits). + +### Are Obol Splits audited? + +The Obol Splits contracts have been audited, though further development has continued on the contracts since. Consult the audit results [here](../../adv/security/smart_contract_audit.mdx). + +### Are the Obol Splits contracts verified on Etherscan? + +Yes, you can view the verified contracts on Etherscan. A list of the contract deployments can be found [here](https://github.com/ObolNetwork/obol-splits?#deployment). + +### Does my cold wallet have to call the Obol Splits contracts? + +No. Any address can trigger the contracts to move the funds, they do not need to be a member of the Split either. You can set your cold wallet/custodian address as the recipient of the principal and rewards, and use any hot wallet to pay the gas fees to push the ether into the recipient address. + +### Are there any edge cases I should be aware of when using Obol Splits? + +The most important decision is to be aware of whether or not the Split contract you are using has been set up with editability. If a splitter is editable, you should understand what the address that can edit the split does. Is the editor an EOA? Who controls that address? How secure is their seed phrase? Is it a smart contract? What can that contract do? Can the controller contract be upgraded? etc. Generally, the safest thing in Obol's perspective is not to have an editable splitter, and if in future you are unhappy with the configuration, that you exit the validator and create a fresh cluster with new settings that fit your needs. + +Another aspect to be aware of is how the splitting of principal from rewards works using the Optimistic Withdrawal Recipient contract. There are edge cases relating to not calling the contracts periodically or ahead of a withdrawal, activating more validators than the contract was configured for, and a worst case mass slashing on the network. Consult the documentation on the contract [here](../../learn/intro/obol-splits.mdx#optimistic-withdrawal-recipient), its audit [here](../../adv/security/smart_contract_audit.mdx), and follow up with the core team if you have further questions. + +## Debugging Errors in Logs + +You can check if the containers on your node are outputting errors by running `docker compose logs` on a machine with a running cluster. + +Diagnose some common errors and view their resolutions [here](../../adv/troubleshooting/errors.mdx). diff --git a/versioned_docs/version-v1.2.0/learn/intro/key-concepts.md b/versioned_docs/version-v1.2.0/learn/intro/key-concepts.md new file mode 100644 index 0000000000..884c9d6283 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/key-concepts.md @@ -0,0 +1,110 @@ +--- +sidebar_position: 2 +description: Some of the key terms in the field of Distributed Validator Technology +--- + +# Key Staking Concepts + +This page outlines a number of the key concepts behind the various technologies that Obol is developing. + +## Distributed validator + +![A Distributed Validator](/img/32Eth.png) + +A distributed validator is an Ethereum proof-of-stake validator that runs on more than one node/machine. This functionality is possible with the use of **Distributed Validator Technology** (DVT). + +Distributed validator technology removes some of the single points of failure in validation. Should <33% of the participating nodes in a DV cluster go offline, the remaining active nodes can still come to consensus on what to sign and can produce valid signatures for their staking duties. This is known as Active/Active redundancy, a common pattern for minimizing downtime in mission critical systems. + +## Distributed Validator Node + +![A Distributed Validator Node](/img/DVNode.png) + +A distributed validator node is the set of clients an operator needs to configure and run to fulfil the duties of a Distributed Validator Operator. An operator may also run redundant execution and consensus clients, an execution payload relayer like [mev-boost](https://github.com/flashbots/mev-boost), or other monitoring or telemetry services on the same hardware to ensure optimal performance. + +In the above example, the stack includes Geth, Lighthouse, Charon and Teku. + +### Execution Client + +![A Geth Client](/img/POWNodeV2.png) + +An execution client (formerly known as an Eth1 client) specializes in running the EVM and managing the transaction pool for the Ethereum network. These clients provide execution payloads to consensus clients for inclusion into blocks. + +Examples of execution clients include: + +- [Go-Ethereum](https://geth.ethereum.org/) +- [Nethermind](https://docs.nethermind.io/) +- [Erigon](https://github.com/ledgerwatch/erigon) + +### Consensus Client + +![A Geth Client](/img/POSClient.png) + +A consensus client's duty is to run the proof of stake consensus layer of Ethereum, often referred to as the beacon chain. + +Examples of Consensus clients include: + +- [Prysm](https://docs.prylabs.network/docs/how-prysm-works/beacon-node) +- [Teku](https://docs.teku.consensys.net/en/stable/) +- [Lighthouse](https://lighthouse-book.sigmaprime.io/api-bn.html) +- [Nimbus](https://nimbus.guide/) +- [Lodestar](https://github.com/ChainSafe/lodestar) + +### Distributed Validator Client + +![A Charon Client](/img/CharonBrick.png) + +A distributed validator client intercepts the validator client ↔ consensus client communication flow over the [standardised REST API](https://ethereum.github.io/beacon-APIs/#/ValidatorRequiredApi), and focuses on two core duties: + +- Coming to consensus on a candidate duty for all validators to sign. +- Combining signatures from all validators into a distributed validator signature. + +The only example of a distributed validator client built with a non-custodial middleware architecture to date is [Charon](../charon/intro). + +### Validator Client + +![A Lighthouse Client](/img/ValidatorBrick.png) + +A validator client is a piece of code that operates one or more Ethereum validators. + +Examples of validator clients include: + +- [Vouch](https://www.attestant.io/posts/introducing-vouch/) +- [Prysm](https://docs.prylabs.network/docs/how-prysm-works/prysm-validator-client/) +- [Teku](https://docs.teku.consensys.net/en/stable/) +- [Lighthouse](https://lighthouse-book.sigmaprime.io/api-vc.html) + +## Distributed Validator Cluster + +![A Distributed Validator Cluster](/img/DVCluster.png) + +A distributed validator cluster is a collection of distributed validator nodes connected together to service a set of distributed validators generated during a DVK ceremony. + +### Distributed Validator Key + +![A Distributed Validator Key](/img/ThresholdSigning.png) + +A distributed validator key is a group of BLS private keys, that together operate as a threshold key for participating in proof of stake consensus. + +### Distributed Validator Key Share + +One piece of the distributed validator private key. + +### Distributed Validator Threshold + +The number of nodes in a cluster that needs to be online and honest for their distributed validators to be online is outlined in the following table. + +| Cluster Size | Threshold | Note | +|:------------:|:---------:|:------------------| +| 4 | 3/4 | Minimum threshold | +| 5 | 4/5 | | +| 6 | 4/6 | Minimum to tolerate two offline nodes| +| 7 | 5/7 | Minimum to tolerate two **malicious** nodes | +| 8 | 6/8 | | +| 9 | 6/9 | Minimum to tolerate three offline nodes | +| 10 | 7/10 | Minimum to tolerate three **malicious** nodes | + +### Distributed Validator Key Generation Ceremony + +To achieve fault tolerance in a distributed validator, the individual private key shares need to be generated together. Rather than have a trusted dealer produce a private key, split it and distribute it, the preferred approach is to never construct the full private key at any point, by having each operator in the distributed validator cluster participate in what is known as a Distributed Key Generation ceremony. + +A distributed validator key generation ceremony is a type of DKG ceremony. A ceremony produces signed validator deposit and exit data, along with all of the validator key shares and their associated metadata. Read more about these ceremonies [here](../charon/dkg). diff --git a/versioned_docs/version-v1.2.0/learn/intro/launchpad.md b/versioned_docs/version-v1.2.0/learn/intro/launchpad.md new file mode 100644 index 0000000000..92e2a9c246 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/launchpad.md @@ -0,0 +1,27 @@ +--- +description: A dapp to securely create Distributed Validators alone or with a group. +sidebar_position: 5 +--- + +# DV Launchpad + +![DV Launchpad Promo Image](/img/DistributeYourValidators.svg) + +In order to activate an Ethereum validator, 32 ETH must be deposited into the official deposit contract. + +The vast majority of users that created validators to date have used the **[~~Eth2~~ Staking Launchpad](https://launchpad.ethereum.org/)**, a public good open source website built by the Ethereum Foundation alongside participants who later went on to found Obol. This tool has been wildly successful in the safe and educational creation of a significant number of validators on the Ethereum mainnet. + +To facilitate the generation of distributed validator keys amongst remote users with high trust, the Obol Network developed and maintains a website that enables a group of users to come together and create these threshold keys: **The DV Launchpad**. + +## Getting started + +For more information on running Charon in a UI friendly way through the DV Launchpad, take a look at our [Quickstart Guides](../../run/start/quickstart_overview.md). + +## DV Launchpad Links + +| Ethereum Network | Launchpad | +|--------------|-------------------------------------| +| Mainnet | https://launchpad.obol.org | +| Gnosis Chain | https://gnosischain.launchpad.obol.org | +| Holesky | https://holesky.launchpad.obol.org | +| Sepolia | https://sepolia.launchpad.obol.org | diff --git a/versioned_docs/version-v1.2.0/learn/intro/obol-collective.md b/versioned_docs/version-v1.2.0/learn/intro/obol-collective.md new file mode 100644 index 0000000000..78bbaf126d --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/obol-collective.md @@ -0,0 +1,34 @@ +--- +sidebar_position: 1 +description: The Obol Collective +--- + +# Obol Collective + +## What is the Obol Collective? + +The Obol Collective is a collection of tools, teams, and communities dedicated to scaling Ethereum by strengthening the security, resiliency, and decentralisation of the consensus layer through the development and deployment of distributed validators. The Distributed Validator middleware client, Charon, boosts the security, resilience, and decentralisation of the Ethereum validator network by enabling “squad staking”. The Collective is fueled by Obol’s economic model, which directs 1% of staking rewards from DVs to ecosystem projects via retroactive funding - a positive flywheel to accelerate adoption of DVs and scale the consensus layer. + +The list of Obol Collective participants includes 50+ staking protocols, client teams, software tools, education & community projects, professional node operators, home operators, and stakers, including names like EigenLayer, Lido, EtherFi, Figment, Bitcoin Suisse, Stakewise, Nethermind, Blockdaemon, Chorus One, DappNode, and many more. Learn more at [Obol.org](https://obol.org). + +## What is DV Labs?​ + +DV Labs (originally “Obol Labs”) is one of the core research and software development teams building DVT for Ethereum. DV Labs’ mission is to build shared web3 technologies for Ethereum infrastructure operators, to establish a credibly neutral and trust-minimised infrastructure layer. DV Labs’ Distributed Validator middleware client, Charon, boosts the security, resilience, and decentralisation of the Ethereum validator network by enabling “squad staking”. Learn more at [DVLabs.tech](https://dvlabs.tech). + +## Scaling Ethereum’s Consensus Layer​ + +Obol is focused on scaling consensus by providing permissionless access to Distributed Validators (DVs), which offer not only protection against client issues and key mismanagement, but also byzantine fault tolerance. We believe that distributed validators should and will make up a large portion of mainnet validator configurations. The transition of the Ethereum community to DVs will enable a new trust paradigm, finally allowing the validator to become the most credible cash flow stream in the world. + +Similar to how roll-up technology laid the foundation for L2 scaling implementations, we believe DVT will do the same for scaling consensus while preserving decentralization. Layers like Obol are critical to the long term viability and resiliency of public networks like Ethereum. We believe DVT will evolve into a widely used primitive and will ensure the security, resiliency, and decentralization of the public blockchain networks that adopt it. + +### Sustainable Public Goods + +Obol is inspired by previous work on Ethereum public goods and experimenting with circular economics. We believe that to unlock innovation in staking use cases, a [credibly neutral](https://blog.obol.org/why-we-built-charon-as-a-middleware/) layer must exist for innovation to flow and evolve vertically. The Obol staking stack consists of four core public goods: + +The Obol Network consists of four core public goods: +- [Charon](../../learn/charon/intro.md), a middleware client that enables validators to run in a fault-tolerant, distributed manner; +- The [Distributed Validator Launchpad](../../learn/intro/launchpad.md), a user interface for configuring Distributed Validators; +- [Obol Splits](../../learn/intro/obol-splits.mdx), a set of solidity smart contracts for the distribution of rewards from Distributed Validators; +- Obol [SDK](../../sdk/index.md) & [API](https://docs.obol.org/api), allowing Distributed Validator clusters to be configured and run at scale, for example within staking protocols. + +The [launch of the Obol Collective](https://blog.obol.org/announcing-the-obol-collective/) began the journey to becoming an open, community governed, self-sustaining project. Together we will incentivize, build, and maintain distributed validator technology that makes public networks a more secure and resilient foundation to build on top of. Read more about our economic model in our [One Percent For Decentralisation](https://blog.obol.tech/1-percent-for-decentralisation/) announcement. diff --git a/versioned_docs/version-v1.2.0/learn/intro/obol-splits.mdx b/versioned_docs/version-v1.2.0/learn/intro/obol-splits.mdx new file mode 100644 index 0000000000..685931d6aa --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/obol-splits.mdx @@ -0,0 +1,100 @@ +--- +sidebar_position: 4 +description: Smart contracts for managing Distributed Validators +--- + +# Obol Splits + +Obol develops and maintains a suite of smart contracts for use with Distributed Validators. These contracts include: + +- Withdrawal Recipients: Contracts used for a validator's withdrawal address. +- Split contracts: Contracts to split ether across multiple entities. Developed by [Splits.org](https://splits.org) +- Split controllers: Contracts that can mutate a splitter's configuration. + +Two key goals of validator reward management are: + +1. To be able to differentiate reward ether from principal ether such that node operators can be paid a percentage of the _reward_ they accrue for the principal provider rather than a percentage of _principal+reward_. +2. To be able to withdraw the rewards in an ongoing manner without exiting the validator. + +Without access to the consensus layer state in the EVM to check a validator's status or balance, and due to the incoming ether being from an irregular state transition, neither of these requirements are easily satisfiable. + +The following sections outline different contracts that can be composed to form a solution for one or both goals. + +## Withdrawal Recipients + +Validators have two streams of revenue, the consensus layer rewards and the execution layer rewards. Withdrawal Recipients focus on the former, receiving the balance skimming from a validator with >32 ether in an ongoing manner, and receiving the principal of the validator upon exit. + +### Optimistic Withdrawal Recipient + + + Optimistic Withdrawal Recpient graphic + + +This is the primary withdrawal recipient Obol uses, as it allows for the separation of reward from principal, as well as permitting the ongoing withdrawal of accruing rewards. + +An Optimistic Withdrawal Recipient [contract](https://github.com/ObolNetwork/obol-splits/blob/main/src/owr/OptimisticWithdrawalRecipient.sol) takes three inputs when deployed: + +- A _principal_ address: The address that controls where the principal ether will be transferred post-exit. +- A _reward_ address: The address where the accruing reward ether is transferred to. +- The amount of ether that makes up the principal. + +This contract **assumes that any ether that has appeared in its address since it was last able to do balance accounting is skimming reward from an ongoing validator** (or number of validators) unless the change is > 16 ether. This means balance skimming is immediately claimable as reward, while an inflow of e.g. 31 ether is tracked as a return of principal (despite being slashed in this example). + +:::danger + +Worst-case mass slashings can theoretically exceed 16 ether, if this were to occur, the returned principal would be misclassified as a reward, and distributed to the wrong address. This risk is the drawback that makes this contract variant 'optimistic'. If you intend to use this contract type, **it is important you fully understand and accept this risk**. + +The alternative is to use an splits.org [waterfall contract](https://docs.splits.org/core/waterfall), which won't allow the claiming of rewards until all principal ether has been returned, meaning validators need to be exited for operators to claim their CL rewards. + +::: + +This contract fits both design goals and can be used with thousands of validators. It is safe to deploy an Optimistic Withdrawal Recipient with a principal higher than you actually end up using, though you should process the accrued rewards before exiting a validator or the reward recipients will be short-changed as that balance may be counted as principal instead of reward the next time the contract is updated. If you activate more validators than you specified in your contract deployment, you will record too much ether as reward and will overpay your reward address with ether that was principal ether, not earned ether. Current iterations of this contract are not designed for editing the amount of principal set. + +#### OWR Factory Deployment + +The OptimisticWithdrawalRecipient contract is deployed via a [factory contract](https://github.com/ObolNetwork/obol-splits/blob/main/src/owr/OptimisticWithdrawalRecipientFactory.sol). The factory is deployed at the following addresses on the following chains. + +| Chain | Address | +|---------|-------------------------------------------------------------------------------------------------------------------------------| +| Mainnet | [0x119acd7844cbdd5fc09b1c6a4408f490c8f7f522](https://etherscan.io/address/0x119acd7844cbdd5fc09b1c6a4408f490c8f7f522) | +| Goerli | [0xe9557FCC055c89515AE9F3A4B1238575Fcd80c26](https://goerli.etherscan.io/address/0xe9557FCC055c89515AE9F3A4B1238575Fcd80c26) | +| Holesky | [0x7fec4add6b5ee2b6c1cba232bc6db754794cb6df](https://holesky.etherscan.io/address/0x7fec4add6b5ee2b6c1cba232bc6db754794cb6df) | +| Sepolia | [0xca78f8fda7ec13ae246e4d4cd38b9ce25a12e64a](https://sepolia.etherscan.io/address/0xca78f8fda7ec13ae246e4d4cd38b9ce25a12e64a) | + +### Exitable Withdrawal Recipient + +A much awaited feature for proof of stake Ethereum is the ability to trigger the exit of a validator with only the withdrawal address. This is tracked in [EIP-7002](https://eips.ethereum.org/EIPS/eip-7002). Support for this feature will be inheritable in all other withdrawal recipient contracts. This will mitigate the risk to a principal provider of funds being stuck, or a validator being irrecoverably offline. + +## Split Contracts + +A split, or splitter, is a set of contracts that can divide ether or an ERC20 across a number of addresses. Splits are often used in conjunction with withdrawal recipients. Execution Layer rewards for a DV are directed to a split address through the use of a `fee recipient` address. Splits can be either immutable, or mutable by way of an admin address capable of updating them. + +Further information about splits can be found on the splits.org team's [docs site](https://docs.splits.org/). The addresses of their deployments can be found [here](https://docs.splits.org/core/split#addresses). + +## Split Controllers + +Splits can be completely edited through the use of the `controller` address, however, total editability of a split is not always wanted. A permissive controller and a restrictive controller are given as examples below. + +### (Gnosis) SAFE wallet + +A [SAFE](https://safe.global/) is a common method to administrate a mutable split. The most well-known deployment of this pattern is the [protocol guild](https://protocol-guild.readthedocs.io/en/latest/3-smart-contract.html). The SAFE can arbitrarily update the split to any set of addresses with any valid set of percentages. + +### Immutable Split Controller + +This is a [contract](https://github.com/ObolNetwork/obol-splits/blob/main/src/controllers/ImmutableSplitController.sol) that updates one split configuration with another, exactly once. Only a permissioned address can trigger the change. This contract is suitable for changing a split at an unknown point in future to a configuration pre-defined at deployment. + +The Immutable Split Controller [factory contract](https://github.com/ObolNetwork/obol-splits/blob/main/src/controllers/ImmutableSplitControllerFactory.sol) can be found at the following addresses: + +| Chain | Address | +|---------|-------------------------------------------------------------------------------------------------------------------------------| +| Mainnet | [0x49e7cA187F1E94d9A0d1DFBd6CCCd69Ca17F56a4](https://etherscan.io/address/0x49e7cA187F1E94d9A0d1DFBd6CCCd69Ca17F56a4)| +| Goerli | [0x64a2c4A50B1f46c3e2bF753CFe270ceB18b5e18f](https://goerli.etherscan.io/address/0x64a2c4A50B1f46c3e2bF753CFe270ceB18b5e18f) | +| Holesky | | +| Sepolia | | diff --git a/versioned_docs/version-v1.2.0/learn/intro/obol-vs-others.md b/versioned_docs/version-v1.2.0/learn/intro/obol-vs-others.md new file mode 100644 index 0000000000..f9dde57760 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/obol-vs-others.md @@ -0,0 +1,60 @@ +--- +sidebar_position: 2 +description: Some of the key terms in the field of Distributed Validator Technology +--- + +# Obol vs Other DV Implementations + +This page outlines the unique features of Obol's DV implemenation, constrasting with other DV implementations. We built Obol’s DVT as a middleware to keep Ethereum secure, resilient, and composable. See also the blog article [Why We Built Charon as a Middleware](https://blog.obol.org/why-we-built-charon-as-a-middleware/). + +![Obol vs others table](/img/ObolvsOthers.png) + +## No private keys put on chain + +Obol's distributed key generation (DKG) event generates key shares for each node within the DV cluster. The entire validator key NEVER exists in one place. Keys are generated locally on the nodes, and can be backed up. The private keys of Obol DVs are NEVER uploaded to the internet or published on-chain. + +An alternative approach to doing this is to split it into shares, encrypt each share with the public key of a node operator, and publish the encrypted private key on chain. The operators’ node key could then decrypt the validator private key. In our opinion, this is not secure. We believe that the safest approach is to avoid the existence of a singular private key, and certainly never to post any private key to a public blockchain network. + +## Cluster independance: Clusters can upgrade independently + +In an Obol DV cluster, nodes use LibP2P to communicate directly with each other, and communications are end-to-end encrypted with TLS. Clusters are independent from one another, can run different versions of Charon, and don't need to upgrade together. This means that when a new version of Obol’s Charon is released, Obol DV clusters can upgrade on their own time, individually from other DV clusters. Charon will NEVER require a hard fork or simultaneous updates across clusters for any upgrades. + +![Cluster Independence](/img/ClusterIndependence.png) + +## Cluster independance: No reliance on a common P2P gossip network + +In an Obol DV cluster, nodes use LibP2P to communicate directly with each other, and communications are end-to-end encrypted with TSL. This direct communication of nodes within a cluster improves latency, and makes cluster communications harder to attack with a denial of service (DOS) attack. It also allows an Obol DV cluster to be run within a private network. This may allow cost savings on data egress costs, for operators running cluster nodes across multiple locations of a single cloud provider, for example. + +![Gossip Network](/img/GossipNetwork.png) + +## Works with existing validator clients and PKI + +We built Obol’s DV implementation as a secure and trust-minimised middleware architecture. Our middleware client, Charon, doesn’t replace anything in the client stack, instead it sits between the consensus and validator clients. Node operators integrating the Charon DVT middleware into their stack can continue to use the same clients and private key infrastructure as before, albeit with a different key generation method. + +The alternative approach to DV design is to replace the validator client with a DV-native client, which has custody of the private keys and the capability to sign arbitrary data. However, in our opinion a full validator client capable of signing and exfiltrating arbitrary data without the oversight of a second software implementation has much higher risk of causing correlated slashing. + +![Standard VC](/img/StandardVC.png) + +This gives the benefit of having both Charon and the existing validator client as failsafes, greatly reducing the odds of unintended slashing. Even in the worst case scenario where Charon is compromised by a supply chain attack or a remote code execution attack, or the Obol team become bad actors and push a malicious release, Charon cannot do a lot of damage as a middleware. If a compromised Charon client proposes a potential double vote or surround vote for a validator to sign, the validator client will check its anti-slashing database, see that it has already signed something conflicting, and simply refuse to return a signature. Charon could propose that a validator should sign an invalid block, but the chain would reject this and simply consider the proposal missed - a much better outcome than slashing. + +## No non-ETH token risk + +Obol makes no changes to Ethereum’s standard bonding and reward mechanism, and does not require nodes to post any bonds additional to the 32 ETH required for a validator. To pay out rewards to operators, splitter contracts like [Obol Splits](../intro/obol-splits.mdx) can be used to withdraw and share rewards on a continuous basis. This allows products like liquid staking protocols to be built on top of Obol, implementing a bond or unique token into their protocol, should they choose to do so. + +![ETH only](/img/ETHonly.png) + +The alternative approach is to create a token and require stakers to pay operators in that token. This would require stakers to keep a balance of the network token ready for fee paying, in order to continue using the staking service. This mechanism would be informed by oracles, which decide when to post rewards and punish operators. This alternative model has some drawbacks. Namely, the varying price of the network’s unique token will change relative to the price of ETH: operators are not able to determine their commission as a percentage of ETH staked, and stakers likewise must consider the additional initial cost of purchasing the token to determine their long-term rate of return on their staked ETH. + +![ETH and Fee Token](/img/ETHandFee.png) + + +## Non-custodial reward splits + +(see also the [docs page on Splits](../../learn/intro/obol-splits.mdx), and the [Splits.org blog article](https://splits.org/blog/obol-ethereum-resilience/).) + +To pay out rewards to operators, splitter contracts like Obol Splits can be used to withdraw and share rewards on a continuous basis. Two key goals of validator reward management are: + 1. To be able to differentiate reward ether from principal ether such that node operators can be paid a percentage of the *reward* they accrue for the principal provider, rather than a percentage of *principal and reward*. + 2. To be able to withdraw the rewards in an ongoing manner without exiting the validator. +This allows products like liquid staking protocols to be built on top of Obol, implementing a bond or unique token into their protocol, should they choose to do so. + +![Splits OWR](/img/ObolSplits.png) diff --git a/versioned_docs/version-v1.2.0/learn/intro/staking-stack.md b/versioned_docs/version-v1.2.0/learn/intro/staking-stack.md new file mode 100644 index 0000000000..78b72472b5 --- /dev/null +++ b/versioned_docs/version-v1.2.0/learn/intro/staking-stack.md @@ -0,0 +1,33 @@ +--- +sidebar_position: 3 +description: The Obol Modular Staking Stack +--- + +# Obol's Modular Staking Stack + +## The Components + +- [Charon](../charon/intro.md) - Obol's DV Middleware client. +- [Obol Splits](../intro/obol-splits.mdx) - A suite of smart contracts for use with distributed validators. +- [DV Launchpad](../intro/launchpad.md) - A website interface for configuring and activating distributed validators. +- [Obol SDK](../../adv/advanced/quickstart-sdk.mdx) - for creating Distributed Validators with the help of the Obol API. + +![Obol Stacking Stack](/img/StakingStack.png) + +## The Vision + +The road to decentralizing stake is a long one. At Obol we have divided our vision into two key versions of distributed validators. + +### V1 - Trusted Distributed Validators + +![Multi Operator DV Cluster](/img/MultiOperator7.png) + +The first version of distributed validators will have dispute resolution out of band. Meaning you need to know and communicate with your other operators if there is an issue with your shared cluster. + +A DV without in-band dispute resolution/incentivisation is still extremely valuable. Individuals and staking as a service providers can deploy DVs on their own to make their validators fault tolerant. Groups can run DVs together, but need to bring their own dispute resolution to the table, whether that be a smart contract of their own, a traditional legal service agreement, or simply high trust between the group. + +### V2 - Trustless Distributed Validators + +As described in our [roadmap blog article](https://blog.obol.org/roadmap-the-distributed-validator-protocol/) published in February 2024, Version 2 of Charon will layer in a (dis)incentivisation scheme to solve the “lazy operator” problem, whereby an offline operator within a DV cluster does not earn any rewards. Further incentivisation alignment can be achieved with operator bonding requirements that can be slashed for unacceptable performance. + +To add an un-gameable incentivisation layer to threshold validation requires complex interactive cryptography schemes, secure off-chain dispute resolution, and EVM support for proofs of the consensus layer state, as a result, this will be a longer and more complex undertaking than V1, hence the delineation between the two. Some of the published R&D material is available in the [further reading](https://docs.obol.org/next/fr/resources#research-and-development) section of the docs. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/_category_.json b/versioned_docs/version-v1.2.0/run/_category_.json new file mode 100644 index 0000000000..a92ca5ffd7 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/_category_.json @@ -0,0 +1,7 @@ +{ + "label": "RUN A DV", + "position": 2, + "collapsed": false, + "collapsible": false, + "className": "menuSection" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/integrations/Dappnode.mdx b/versioned_docs/version-v1.2.0/run/integrations/Dappnode.mdx new file mode 100644 index 0000000000..b64053b4f9 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/integrations/Dappnode.mdx @@ -0,0 +1,32 @@ +--- +sidebar_position: 3 +description: Dappnode Frequently asked questions +--- + +# Dappnode + +# Dappnode + +## For setup, see quickstart guide: + +For setup of a DV using Dappnode, see the quickstart guide [Create a DV Alone](../start/quickstart_alone.mdx), and select the appropriate tab for "Dappnode". + +## Frequently asked questions + +### If an operator uses an ENR to join a cluster, then exits the validator key, do they need to clean up the validator and Charon volumes to use the same ENR for another cluster? + +Yes, they need to clean up the Charon and validator volumes. However, instead of deleting everything, the operator can: + +1. Download a backup (keep a copy just in case). +2. Edit the backup, keeping only the necessary files from the specific cluster (see image below) +necessary files +3. Recompress the edited backup and upload it again after removing the Charon and validator volumes. + +### Does an operator need to use the `VALIDATOR_EXTRA_OPTS` to pass the `builderonly` or `builderalways` flag for Lodestar VC? + +No, if `ENABLE_MEV_BOOST` is set to `true`, these flags will be added automatically. +flags will be added + +### How can users running two clusters (e.g., one for EtherFi solo stakers and another for Techne) on the same Dappnode machine push monitoring data from both clusters to Obol? + +In the Config tab, there is a field called "Charons to monitor by Obol (optional)". You just need to enter the cluster numbers you are using in Dappnode. For example, if you’re running three nodes on clusters 1, 2, and 3, you would enter “1,2,3”. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/integrations/_category_.json b/versioned_docs/version-v1.2.0/run/integrations/_category_.json new file mode 100644 index 0000000000..4ef344ed5e --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/integrations/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Partner Integrations", + "position": 4, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/integrations/lido-csm.md b/versioned_docs/version-v1.2.0/run/integrations/lido-csm.md new file mode 100644 index 0000000000..a1b4f0e685 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/integrations/lido-csm.md @@ -0,0 +1,232 @@ +--- +sidebar_position: 2 +description: Setup and run a DV within the Lido Community Staking Module +--- +# Create a Lido CSM DV + +This is a guide on taking part in Lido's [Community Staking Module](https://operatorportal.lido.fi/modules/community-staking-module) (CSM) with a squad as part of a [Distributed Validator Cluster](../../learn/intro/key-concepts.md#distributed-validator-cluster). + +To start, this guide makes a couple assumptions: + +1. You're running a Linux distribution and you've installed [Git](https://git-scm.com/downloads) and [Docker](https://docs.docker.com/engine/install/) (as a [non-root user](https://docs.docker.com/engine/install/linux-postinstall/#manage-docker-as-a-non-root-user)). +2. You will be deploying on Ethereum mainnet. Some screenshots in this guide are from Holesky just for demonstration purposes, so please verify you are using the correct [mainnet addresses](https://operatorportal.lido.fi/modules/community-staking-module#block-d8e94f551b2e47029a54e6cedea914a7). + +## Getting started + +This guide is broken down into 3 parts: + +Part 1: Creating a shared [SAFE](https://safe.global/) wallet for the cluster, and a [Splits.org](https://splits.org) reward splitting contract + +Part 2: Using the [Obol DV Launchpad](https://launchpad.obol.org/) + CLI to create the cluster + +Part 3: Deploying the validator to Lido's CSM using their UI. + +:::info +In this guide we'll be using CSM UI in advanced mode, using the `extendedManagerPermissions` to set the `managerAddress` to the cluster multi-sig (SAFE) and the `rewardAddress` to the Splits.org splitting contract. +::: + +## Part 1: Creating the Cluster SAFE + Splitter Contract +### Deploy the SAFE + +Detailed instructions on how to create a SAFE Wallet can be found [here](https://help.safe.global/en/articles/40868-creating-a-safe-on-a-web-browser). + +The squad leader should obtain signing addresses for all the cluster members, to create a new SAFE with the operators all as owners. + +![SAFE UI Login Screen](/img/CSM_walkthrough1.png) + +After giving the Safe a name and selecting the appropriate network, continue by clicking the **Next** button. + +![Create SAFE UI](/img/CSM_walkthrough2.png) + +Add all the signer addresses of the cluster members, select a threshold, and proceed to the final step by clicking the **Next** button. + +:::tip +Don't require 100% of signers to approve transactions, in case someone loses access to their address. Using the same [threshold](../../learn/intro/key-concepts.md#distributed-validator-threshold) as your cluster will use is a reasonable starting point. +::: + +![Setting SAFE Threshold](/img/CSM_walkthrough3.png) + +Finally, submit the transaction to create the Safe by clicking on the **Create** button. + +![Creating the SAFE](/img/CSM_walkthrough4.png) + +### Deploy the Splitter Contract + +The squad leader should obtain the reward addresses from all the cluster members (if members want to use a distinct address to the one they sign with for receiving rewards). Open https://app.splits.org and create a `New contract`. Make sure to select the appropriate network. + +![Choosing Network](/img/CSM_walkthrough5.png) + +Select `Split` for the contract type. + +![Selecting Split Contract](/img/CSM_walkthrough6.png) + +Add the reward addresses of all cluster members. Choose whether the contract is immutable (reccommended option), whether to sponsor the maintainers of [splits.org](https://splits.org), and optionally whether to set a distribution bounty such that third parties could pay the gas costs of distributing the accrued rewards in exchange for a small fee. + +:::tip +If your cluster would like to contribute a portion of its rewards to Obol’s '[1% for Decentralisation](https://blog.obol.org/1-percent-for-decentralisation/)' Retroactive Fund, thereby earning [Obol Contributions](https://obol.org/contributions) as part of Lido's [integration of CSM](https://research.lido.fi/t/integrate-csm-into-the-decentralized-validator-vault/8621) into the DV Vault, you must add [retroactivefunding.obol.eth](https://etherscan.io/address/0xDe5aE4De36c966747Ea7DF13BD9589642e2B1D0d) as a recipient of 0.1% of the splitter contract. This will contribute 0.1% of rewards **and your CSM bond** to Obol's RAF. Future versions of CSM integrations will enable contributing exactly 1% of accruing CSM rewards. +::: + +![Adding recipients](/img/CSM_walkthrough7.png) + +Finally, click the **Create Split** button, execute the transaction and share the created split contract with all cluster members for review. + + +## Part 2: Use the DV Launchpad + CLI to create the cluster keys + +`Charon` is the middleware client that enables validators to be run by a group of independent node operators - a cluster or squad. A complete multi-container `Docker` setup including execution client, consensus client, validator client, MEV-Boost, the `Charon` client and monitoring tools can be found in [this repository](https://github.com/ObolNetwork/charon-distributed-validator-node). + + +### Step 1: Clone the repo + +```sh +git clone https://github.com/ObolNetwork/charon-distributed-validator-node.git +``` + +### Step 2: Create ENR and Backup your Private Key + +Enter the CDVN directory: + +```sh +cd charon-distributed-validator-node +``` + +Use docker to create an ENR + +```sh +docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 create enr +``` + +### Back up the private key located in `.charon/charon-enr-private-key` + +![Creating a charon ENR key pair](/img/CSM_walkthrough8.png) + +:::caution +What you see in the console starting with `enr:-` is the **public key** for your Charon node (known as an ENR). The **private key** is in the file `.charon/charon-enr-private-key`, be sure to back it up securely. +::: + + +### Step 3: Create the DV cluster configuration using the Launchpad + +Obol has integrated a CSM details into the DV Launchpad. Choosing the "Lido CSM" withdrawal configuration allows you to create up to 12 validator keys (CSM's Early Access limit) with Lido's required withdrawal and fee recipient addresses. + +To start, the squad leader opens the [DV Launchpad](https://launchpad.obol.org), then connects their wallet and chooses **Create a cluster with a group**. + +![DV Launchpad Home Page](/img/CSM_walkthrough9.png) + +Then click **Get Started**. + +![Creation Summary Screen](/img/CSM_walkthrough10.png) + +Accept all the necessary advisories and sign to confirm. + +![Obol Warnings, Terms, and Advisories](/img/CSM_walkthrough11.png) + +Cluster configuration begins next. First, select the cluster name and size, then enter all cluster members signer addresses. + +![Configure Cluster Page](/img/CSM_walkthrough12.png) + +- Select the number of validators to create (CSM's Early Access phase is capped at a maximum 12 validators). +- (If the cluster creator is taking part in the cluster) Enter your Charon node's ENR which was generated during [step 2](#step-2-create-enr-and-backup-your-private-key) above. +- In the **Withdrawal Configuration** field, select `LIDO CSM`. This will automatically fill the required Withdrawal Address and Fee Recipient Addresss per [Lido's Documentation](https://operatorportal.lido.fi/modules/community-staking-module#block-d8e94f551b2e47029a54e6cedea914a7). +- Finally, click on the **Create Cluster Configuration** button. + +![Further Cluster Configuration](/img/CSM_walkthrough13.png) + +Lastly, share the cluster invite link with the other cluster members. + +![Cluster Invitation Page](/img/CSM_walkthrough14.png) + + +### Step 4: Distributed Key Generation (DKG) + +All squad members need to open the cluster invitation link, connect their wallet, accept all necessary advisories, and to verify the cluster configuration is correct with a signature. Each squad member will also need to upload and sign an ENR to represent their charon client, so see [steps 1](#step-1-clone-the-repo) and [2](#step-2-create-enr-and-backup-your-private-key) above. + +![Cluster Invitation Acceptance Page](/img/CSM_walkthrough15.png) + +Once all members confirm the configuration they will see the **Continue** button. + +![Prepare for DKG Page](/img/CSM_walkthrough16.png) + +On the next page, they will find a CLI command which is used to begin the Distributed Key Generation (DKG) ceremony. All members need to synchronously complete this step. + +![DKG Command to Run](/img/CSM_walkthrough17.png) + +:::tip +Go back to the terminal and make sure you're in the `charon-distributed-validator-node` directory before running the DKG command: + +```sh +pwd +``` + +If you are not, navigate to it using the `cd` command. +::: + +Paste the DKG command into your terminal and wait for all the other squad members to connect and complete the DKG ceremony. + +![Completed DKG CLI Output](/img/CSM_walkthrough18.png) + +New files were generated: `cluster-lock.json`, `deposit-data.json`, `validator_keys` are all found in the `.charon` folder (hidden by default). This contains each operator's partial key signatures for the validators. + +:::danger +At this point, **each operator must make a backup of the `.charon` folder and keep it safe, as validator keys cannot be recreated if lost**. +::: + +### Step 5: Create a `.env` file for Mainnet + +Copy and rename the `.env.sample.mainnet` file to `.env` + +```sh +cp .env.sample.mainnet .env +``` + +Open the `.env` file using you favourite editor: + +```sh +nano .env +``` + +Uncomment and set `BUILDER_API_ENABLED=true`. + +Uncomment `MEVBOOST_RELAYS=` and set it to the URL of at least one of Lido's approved MEV relays [here](https://enchanted-direction-844.notion.site/6d369eb33f664487800b0dedfe32171e?v=8e5d1f1276b0493caea8a2aa1517ed65). Multiple relays must be separated by a comma. Consult our [deployment best practices](../prepare/deployment-best-practices.md#mev-boost-relays) for further info on MEV relay selection. + + +### Step 6: Starting the Node + +Each cluster member should start the node with the following command: + +```sh +docker compose up -d +``` + +At this point, execution and consensus clients should start syncing. Charon and the validator client should start waiting for the consensus client to be synced and the validator to be activated. + + +## Part 3: Upload the public keys and deposit to Lido CSM + +CSM is launching with a whitelisted set of approved operators (Early Access). The squad member with EA should be the one to create the node through the CSM widget. + +The EA member will head to [CSM Extended Mode](https://csm.lido.fi/?mode=extended) and connect their wallet. (Note the `mode=extended` parameter.) This allows the Lido CSM reward address to be set to the split contract created earlier. + +![CSM Connect Wallet Modal](/img/CSM_walkthrough19.png) + +The EA member clicks on the **Create Node Operator** button. + +![CSM Create Node Operator Modal](/img/CSM_walkthrough20.png) + +- The EA member pastes the contents of the `deposit-data.json` file into the `Upload deposit data` field. The EA member should have enough ETH/stETH/wstETH to cover the bond. + +- Expand the **Specify custom addresses** section. + + - Set the **Reward Address** field to the `Split` contract address and the **Manager Address** field to the `Safe` wallet address. (These were created previously in [part 1](#part-1-creating-the-cluster-safe--splitter-contract)) + + - Verify that the **Extended** box is outlined. This ensures that the `Safe` address has the ability to change the reward address if necessary. + +- Check that the correct addresses are set and click the **Create Node Operator** button. + +![CSM Extended Mode Settings](/img/CSM_walkthrough21.png) + +Sign the transaction. The cluster is ready for deposit from Lido CSM. At this point, your job is finished. + +:::warning +When claiming your cluster's rewards, **be sure to claim in wstETH**. Claiming native ETH will result in loss of funds. Rebasing tokens like stETH may not receive the incremental yield you’re expecting. More information can be found in the [splits.org documentation](https://docs.splits.org/core/split#how-it-works). +::: diff --git a/versioned_docs/version-v1.2.0/run/integrations/quickstart-eigenpod.mdx b/versioned_docs/version-v1.2.0/run/integrations/quickstart-eigenpod.mdx new file mode 100644 index 0000000000..64f825b7a4 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/integrations/quickstart-eigenpod.mdx @@ -0,0 +1,66 @@ +--- +sidebar_position: 1 +description: Create an EigenLayer Distributed Validator to enable distributed restaking. +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Create an EigenLayer DV + +:::warning +The Obol-SDK is in a beta state and should be used with caution. Ensure you validate all important data. +::: + +This is a walkthrough of creating a distributed validator cluster pointing to an [EigenLayer](https://eigenlayer.xyz/) [EigenPod](https://docs.eigenlayer.xyz/eigenlayer/restaking-guides/restaking-user-guide/native-restaking/create-eigenpod-and-set-withdrawal-credentials/), using the [DV Launchpad](../../learn/intro/launchpad.md) and other applications. + +## Pre-requisites + +- The Ethereum addresses or ENS names for the node operators in the cluster. (Currently the DV Launchpad only supports Metamask or equivalent injected web3 browser wallets.) +- If creating more than one validator, the ability to use the [obol-sdk](../../adv/advanced/quickstart-sdk.mdx) is required. + +## Create a SAFE to own the EigenPod + +Deploy a [SAFE](https://app.safe.global/) with the addresses of the node operators as signers. A reasonable signing threshold is the same as a cluster (>2/3rds) but use good judgement if a different threshold or signer set suits your use case. The principal ether for these validators will be returned to this address. + +## Create an EigenPod + +Select the "Create EigenPod" option on the [EigenLayer App](https://app.eigenlayer.xyz/)'s 'Restake' page, using the created SAFE account via WalletConnect. Note the EigenPod's address. + +## Create a Splitter for the block reward + +Create a Splitter on [splits.org](https://app.splits.org/), to divide the block reward and MEV amongst the operators. Note the split's address. + +:::tip +To be recognised as a part of Obol's [1% for Decentralisation](https://blog.obol.tech/1-percent-for-decentralisation/) campaign, you must contribute 3% of execution layer rewards by setting [this address](https://etherscan.io/address/0xDe5aE4De36c966747Ea7DF13BD9589642e2B1D0d) as a recipient on your split. Upcoming Obol EigenPods will support contributing 1% of total rewards instead of 3% of only execution rewards. +::: + +## Create the DV cluster invite + +With these contracts deployed, you can now create the DV cluster invitation to send to Node Operators, this can be done through the DV Launchpad or the Obol SDK. + + + +
    +
  • Use the "Create a cluster with a group" flow on the DV Launchpad.
  • +
  • Choose a cluster name and invite your operator's addresses.
  • +
  • When setting the withdrawal credentials, select "Custom".
  • +
  • For "Withdrawal Address", set the EigenPod contract address.
  • +
  • For "Fee Recipient", set the Split contract address.
  • +
  • Continue the process of creating a cluster normally, share the invitation link with the operators and have them complete the Distributed Key Generation ceremony.
  • +
+
+ +
    +
  • If you are creating a cluster with more than one validator, you will need to craft the cluster invitation with the SDK.
  • +
  • Follow the Create a cluster using the SDK guide.
  • +
  • For withdrawal_address, set the EigenPod contract address.
  • +
  • For fee_recipient_address, set the Split contract address.
  • +
  • Continue the process of creating the cluster as per the guide, share the invitation link with the operators and have them complete the Distributed Key Generation ceremony.
  • +
+
+
+ +## Deposit and restake your Distributed Validator + +Once you have completed the DKG ceremony, you can continue the flow on the EigenLayer app to activate these validators and restake them. Consult the EigenLayer [documentation](https://docs.eigenlayer.xyz/eigenlayer/restaking-guides/restaking-user-guide/native-restaking/create-eigenpod-and-set-withdrawal-credentials/enable-restaking) to continue the process. diff --git a/versioned_docs/version-v1.2.0/run/prepare/_category_.json b/versioned_docs/version-v1.2.0/run/prepare/_category_.json new file mode 100644 index 0000000000..6d4ce7e1b0 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/prepare/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Prepare to run a DV", + "position": 2, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/prepare/deployment-best-practices.md b/versioned_docs/version-v1.2.0/run/prepare/deployment-best-practices.md new file mode 100644 index 0000000000..fc998c791d --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/prepare/deployment-best-practices.md @@ -0,0 +1,101 @@ +--- +sidebar_position: 2 +description: DV Deployment best practices, for running an optimal Distributed Validator setup at scale. +--- + +# Deployment Best Practices + +The following are a selection of best practices for deploying Distributed Validator Clusters at scale on mainnet. + + +## Hardware Specifications + +The following specifications are recommended for bare metal machines for clusters intending to run a significant number of mainnet validators: + +### Minimum Specs + +- A CPU with 4+ cores, favouring high clock speed over more cores. ( >3.0GHz and higher or a cpubenchmark [single thread](https://www.cpubenchmark.net/singleThread.html) score of >2,500) +- 16GB of RAM +- 2TB+ free SSD disk space (for mainnet) +- 1000 read/write SSD IOPS +- 500MB/s read/write SSD speed +- 10mb/s internet bandwidth + +### Recommended Specs for extremely large clusters + +- A CPU with 8+ physical cores, with clock speeds >3.5Ghz +- 32GB+ RAM (depending on the EL+CL clients) +- 4TB+ NVMe storage +- 2000 read/write SSD IOPS +- 1000MB/s read/write SSD speed +- 25mb/s internet bandwidth + +An NVMe storage device is **highly recommended for optimal performance**, offering nearly 10x more random read/writes per second than a standard SSD. + +Inadequate hardware (low-performance virtualized servers and/or slow HDD storage) has been observed to hinder performance, indicating the necessity of provisioning adequate resources. **CPU clock speed and Disk throughput+latency are the most important factors for running a performant validator.** + +Note that the Charon client itself takes less than 1GB of RAM and minimal CPU load. In order to optimize both performance and cost-effectiveness, it is recommended to prioritize physical over virtualized setups. Such configurations typically offer greater performance and minimize overhead associated with virtualization, contributing to improved efficiency and reliability. + +When constructing a DV cluster, it is important to be conscious of whether a cluster runs across cloud providers or stays within a single provider's private networking. This likely can impact the bandwidth and latency of the connections between nodes, as well as the egress costs of the cluster (Charon has a relatively low communication with its peers, averaging 10s of kb/s in large mainnet clusters). Ideally, bare metal machines in different locations within the same continent and with at least two providers, balances redundancy and performance. + +## Intra-cluster Latency + +It is recommended to **keep peer ping latency below 235 milliseconds for all peers in a cluster**. Charon should report a consensus duration averaging under 1 second through its prometheus metric `core_consensus_duration_seconds_bucket` and associated grafana panel titled "Consensus Duration". + +In cases where latencies exceed these thresholds, efforts should be made to reduce the physical distance between nodes or optimize Internet Service Provider (ISP) settings accordingly. Ensure all nodes are connecting to one another directly rather than through a relay. + +For high-scale, performance deployments; inter-peer latency of < 25ms is optimal, along with an average consensus duration under 100ms. + +## Node Locations + +For optimal performance and high availability, it is recommended to provision machines or virtual machines (VMs) within the same continent. This practice helps minimize potential latency issues ensuring efficient communication and responsiveness. Consider maps of [undersea internet cables](https://www.submarinecablemap.com/) when selecting locations across oceans with low latency. + +## Peer Connections + +Charon clients can establish connections with one another in two ways: either through a third publicly accessible server known as [a relay](../../learn/charon/charon-cli-reference.md#host-a-relay) or directly with one another if they can establish a connection. The former is known as a relay connection and the latter is known as a direct connection. + +It is important that all nodes in a cluster be directly connected to one another - this can halve the latency between them and reduces bandwidth constraints significantly. Opening Charon’s p2p port (the default is `3610`) to the Internet, or configuring your routers NAT gateway to permit connections to your Charon client, are what are required to facilitate a direct connection between clients. + +## Instance Independence + +Each node in the cluster should have its own independent beacon node (EL+CL) and validator client as well as Charon client. Sharing beacon nodes between the different nodes would potentially impact the fault tolerance of the cluster and as a result should be avoided. + +## Placement of Charon clients + +If you wish to divide a Distributed Validator node across multiple physical or virtual machines; locate the Charon client on the EL/CL machine instead of the VC machine. This setup reduces latency from Charon to the consensus layer, as well as keeping the public-internet connected clients separate from the clients that hold the validator private keys. Be sure to use encrypted communication between your VC and the Charon client, potentially through a cloud-provided network, a self-managed network tunnel, a VPN, a Kubernetes [CNI](https://kubernetes.io/docs/concepts/extend-kubernetes/compute-storage-net/network-plugins/), or other manner. + +## Node Configuration + +Cluster sizes that allow for Byzantine Fault Tolerance are recommended as they are safer than clusters with simply Crash Fault Tolerance (See this guide for reference - [Cluster Size and Resilience](../../learn/charon/cluster-configuration#cluster-size-and-resilience)). + +## MEV-Boost Relays + +MEV relays are configured at the Consensus Layer or MEV-boost client level. Refer to our [guide](../../run/start/quickstart-builder-api.mdx) to ensure all necessary configuration has been applied to your clients. As with all validators, low latency during proposal opportunities is extremely important. By default, MEV-Boost waits for all configured relays to return a bid, or will timeout if any have not returned a bid within 950ms. This default timeout is generally too slow for a distributed cluster (think of this time as additive to the time it takes the cluster to come to consensus, both of which need to happen within a 2 second window for optimal proposal broadcasting). It is likely better to only list relays that are located geographically near your node, so that once all relays respond (e.g. in < 50ms) your cluster will move forward with the proposal. + +Use Charon's [`test mev` command](../../run/prepare/test-command.mdx#test-mev-relay) to test a number of your preferred relays, and select the two or three relays with the lowest latency to your node(s), you do not need to have the same relays on each node in a cluster. + +## Client Diversity + +The clusters should consist of a combination of your preferred consensus, execution, and validator clients. It is recommended to include a combination of multiple clients in order to have a healthy client diversity within the cluster, ideally, if any single client type fails, it should be less than the fault tolerance of the cluster, and the validators should stay online/not do anything slashable. + +Remote signers can be included as well, such as Web3signer or Dirk. A diversity of private key infrastructure setups further reduces the risk of total key compromise. + +Tested client combinations can be found in the [release notes](https://github.com/ObolNetwork/charon/releases) for each Charon version. + +## Metrics Monitoring + +As requested by Obol Labs, node operators can push [standard monitoring](../../run/start/obol-monitoring.md) (Prometheus) and logging (Loki) data to Obol Labs' core team's cloud infrastructure for in-depth analysis of performance data and to assist during potential issues that may arise. Our recommendation for operators is to independently store information on their node health over the course of the validator lifecycle as well as any information on validator performance that they collect during the normal life cycle of a validator. + +## Obol Splits + +Leveraging [Obol Splits](../../learn/intro/obol-splits.mdx) smart contracts allows for non-custodial fund handling and allows for net customer payouts in an ongoing manner. Obol Splits ensure no commingling of funds across customers, and maintain full non-custodial integrity. Read more about Obol Splits [here](../../learn/intro/faq.mdx#obol-splits). + +## Deposit Process + +Deposit processes can be done via an automated script. This can be used for DV clusters until they reach the desired number of validators. + +It is important to allow time for the validators to be activated (usually < 24 hours). + +Consider using batching smart contracts to reduce the gas cost of a script, but take caution in their integration not to make an invalid deposit. + + diff --git a/versioned_docs/version-v1.2.0/run/prepare/how_where_DVs.md b/versioned_docs/version-v1.2.0/run/prepare/how_where_DVs.md new file mode 100644 index 0000000000..cdecdde8e1 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/prepare/how_where_DVs.md @@ -0,0 +1,49 @@ +--- +sidebar_position: 1 +description: How and where to run DVs +--- + +# How and Where To Run DVs + +## Launchers and Deployment Tooling + +- [Obol CDVN](https://github.com/ObolNetwork/charon-distributed-validator-node) +- [Obol K8s](https://github.com/ObolNetwork/charon-k8s-distributed-validator-node) +- [Obol Helm Charts](https://github.com/ObolNetwork/helm-charts) +- [Obol Ansible Playbooks](https://github.com/ObolNetwork/obol-ansible) +- [Dappnode](https://docs.dappnode.io/docs/user/staking/ethereum/dvt-technologies/obol-network/) +- [Stereum](https://stereum.net/) +- [Sedge](https://github.com/ObolNetwork/sedge/blob/develop/docs/docs/quickstart/charon.mdx) +- [Terraform Charon Relay](https://github.com/ObolNetwork/terraform-charon-relay) +- [Terraform Grafana Charon dashboards](https://github.com/ObolNetwork/terraform-grafana-dashboards) + +## Quickstart Guides +- [Run a DV alone](../start/quickstart_alone.mdx) +- [Run a DV as a group](../start/quickstart_group.mdx) +- [Run a DV using the SDK](../../adv/advanced/quickstart-sdk.mdx) + +## CL+VC Combinations: + + +**Legend** +- ✅: All duties succeed in testing +- 🟡: All duties succeed in testing, except non-penalised aggregation duties +- 🟠: Duties may fail for this combination +- 🔴: One or more duties fails consistently + +| Consensus 👇 Validator 👉 | Teku v24.8.0 | Lighthouse v5.3.0[^lhagg] | Lodestar v1.20.2 | Nimbus v24.7.0 | Prysm [PR](https://github.com/prysmaticlabs/prysm/pull/13995) | Remarks | +|-------------------------|--------------|-------------------|------------------|----------------|---------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------| +| Teku v24.8.0 | 🟡 | 🟡 | ✅ | ✅ | 🟠 | Teku `bn` needs the `--validators-graffiti-client-append-format=DISABLED` flag in order to produce blocks properly. Teku `vc` are only failing aggregation duties 50% of the time, which are not directly penalised but impact network density at high scale.| +| Lighthouse v5.3.0 | 🟡 | 🟡 | ✅ | ✅ | 🟠 | Lighthouse `vc` are only failing aggregation duties, which are not directly penalised but impact network density at high scale. | +| Nimbus v24.7.0 | 🟡 | 🟡 | ✅ | ✅ | ✅ | Nimbus beacon nodes requires that you add the following flag to **charon run**: `charon run --feature-set-enable=json_requests` | +| Prysm v5.0.3 | 🟡 | 🟡 | ✅ | ✅ | ✅ | Prysm `validator` needs a particular [pull request](https://github.com/prysmaticlabs/prysm/pull/13995) merged and released for aggregation duties to succeed. | +| Lodestar v1.20.2 | 🟡 | 🟡 | ✅ | ✅ | 🔴 | | + +[^lhagg]: sync committee and aggregator duties are not yet supported in a DV setup by Lighthouse, all other duties work as expected. + + +### Note: +
    +
  • Blinded beacon block proposals are only supported from cluster lock version v1.7 and charon release v0.17.0 onwards.
  • +
  • Prysm VC support is added from prysm version v5.0.0 onwards.
  • +
\ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/prepare/test-command.mdx b/versioned_docs/version-v1.2.0/run/prepare/test-command.mdx new file mode 100644 index 0000000000..c41b210ba8 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/prepare/test-command.mdx @@ -0,0 +1,668 @@ +--- +sidebar_position: 3 +description: Test the performance of a candidate Distributed Validator Cluster setup. +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Test a Cluster + +Charon test commands are designed to help you evaluate the performance and readiness of your candidate cluster. It allows you to test your connection to other Charon peers, the performance of your beacon node(s), the readiness of your validator client, the performance of the MEV relays you will be using and the infrastructure on which you will run the cluster. It prints a performance report to the standard output (which can be omitted with the `--quiet` flag) and a machine-readable JSON format of the report if the `--output-json` flag is set. + + + +

Test all

+ Intended for running tests across all categories. Each flag should have a prefix for its category (i.e.: the flag `--endpoints` from the beacon tests becomes `--beacon-endpoints`). For details about each category refer to their respective sections. + + + + + Regular tests intended for relatively fast run, without putting any major load on any tested system. + Based on which stage you are with your cluster creation, some steps are easened. + + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--peers-enrs` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" \ + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" + ``` +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--peers-definition-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-definition-file="./.charon/cluster-definition.json" \ + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" + ``` +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--peers-lock-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-lock-file="./.charon/cluster-lock.json" \ + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" + ``` +
+
+ +
+ + + Load tests intended for more time consuming run. Beacon nodes are put under heavy load. MEV relays are required to create real blocks. + Based on which stage you are with your cluster creation, some steps are easened. + + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--peers-enrs` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" \ + --beacon-endpoints="http://127.0.0.1:5052/" \ + --beacon-load-test \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test + ``` + +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--peers-definition-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-definition-file="./.charon/cluster-definition.json" \ + --beacon-endpoints="http://127.0.0.1:5052/" \ + --beacon-load-test \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test + ``` + +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--peers-lock-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + charon alpha test all \ + --peers-lock-file="./.charon/cluster-lock.json" \ + --beacon-endpoints="http://127.0.0.1:5052/" \ + --beacon-load-test \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test + ``` + +
+
+ +
+ +
+ +

Test connection to peers

+ +Run tests towards other Charon peers to evaluate the effectiveness of a potential cluster setup. The command sets up a libp2p node, similarly to what Charon normally does. This test command **has to be run simultaneously with the other peers**. After the node is up it waits for other peers to get their nodes up and running, retrying the connection every 3 seconds. The libp2p node connects to relays (configurable with `p2p-relays` flag) and to other libp2p nodes via TCP. Other peer nodes are discoverable by using their ENRs. Note that for a peer to be successfully discovered, it needs to be connected to the same relay. After completion of the test suite the libp2p node stays alive (duration configurable with `keep-alive` flag) for other peers to continue testing against it. The node can be forcefully stopped as well. + +To be able to establish direct connection, you have to ensure: + +- Your machine is publicly accessible on the internet or at least a specific port is. +- You add flag `p2p-tcp-address` (i.e.: `127.0.0.1:9001`) flag and the port specified in it is free and publicly accessible. +- You add the flag `p2p-external-ip` (i.e.: `8.8.8.8`) and specify your public IP. + +If all points are satisfied by you and the other peers, you should be able to establish a direct TCP connection between each other. Note that a relay is still required, as it is used for peer discovery. + +Based on which stage you are with your cluster creation, some steps are easened. + + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--enrs` flag. + +

Example run

+ + ```shell + charon alpha test peers \ + --enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" + ``` +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--definition-file` flag. + +

Example run

+ + ```shell + charon alpha test peers \ + --definition-file="./.charon/cluster-definition.json" + ``` +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--lock-file` flag. + +

Example run

+ + ```shell + charon alpha test peers \ + --lock-file="./.charon/cluster-lock.json" + ``` +
+ +
+ +

Test beacon node

+ +Run tests on beacon node(s), to evaluate their effectiveness for a Distributed Validator cluster. The beacon node is usually the client doing the most work in a validating stack, especially with a high number of validators being serviced by the validator client(s) and Charon(s) that depend on it. + + + + + Regular tests intended for relatively fast run, without putting any major load on any tested system. + +

Pre-requisites

+ + - Running beacon node(s) towards which tests will be executed, supplied to `--endpoints` flag. + +

Example run

+ + ```shell + charon alpha test beacon \ + --endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" + ``` +
+ + + Load tests intended for more time consuming run. Beacon nodes are put under heavy load. + + These tests include simulated workloads for an increasing number of validators, and the process takes some time (approximately ~33 minutes). It is normal to observe some warnings during the simulations. + + A file with detailed results about simulations done is saved at the current working directory (configurable by `--simulation-file-dir` flag). + +

Pre-requisites

+ + - Running beacon node(s) towards which tests will be executed, supplied to `--endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + +

Example run

+ + ```shell + charon alpha test beacon \ + --endpoints="http://127.0.0.1:5052/" \ + --load-test + ``` +
+ +
+ +

Test validator client

+ +Run tests towards your validator client, to evaluate its effectiveness for a Distributed Validator cluster. + +Default endpoint for validator and port is used at `127.0.0.1:3600`. This can be changed by supplying different endpoint to the `--validator-api-address` flag. + +

Pre-requisites

+ +- Running validator client towards which tests will be executed. + +

Example run

+ + ```shell + charon alpha test validator + ``` + +

Test MEV relay

+ +Run tests towards MEV relays, to evaluate their effectiveness for a Distributed Validator cluster. If MEV-Boost clients are configured for the distributed validator nodes, it is of utmost importance that the relays they connect to are fast and reliable. If not, the chance of missing a block proposal increases significantly. Supplying `--beacon-node-endpoint` and `--load-test` flags allows the test to ask relays for real MEV headers, increasing the accuracy (and duration) of this test. + +At least 1 endpoint is required to be supplied to the `--endpoints` flag. + + + +

Pre-requisites

+ + - Running MEV relay(s) towards which tests will be executed. + +

Example run

+ + ```shell + charon alpha test mev \ + --endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" + ``` + +
+ + +

Pre-requisites

+ + - Running MEV relay(s) towards which tests will be executed. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + charon alpha test mev \ + --endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --load-test \ + --beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" + ``` + +
+ +
+ +

Test machine and network performance

+ +Run tests of your machine and network, to evaluate their effectiveness for a Distributed Validator cluster. Distributed Validators need stable, low-latency, internet, a reasonable amount of RAM, and a highly performant disk drive for storage. This test aims to analyse these requirements to give an overview of the systems suitability. + +

Pre-requisites

+ +The storage tests require `fio` to be installed on your machine. Read more about `fio` [here](https://fio.readthedocs.io/en/latest/fio_doc.html). + +

Example run

+ + ```shell + charon alpha test infra + ``` + +
+ + +If you are running Charon using the [charon-distributed-validator-node repository](https://github.com/ObolNetwork/charon-distributed-validator-node/), services like the beacon node and validator client are hosted locally. To run the beacon node and validator client tests, you need to point them toward the correct Docker container, and also include the Docker container’s network. Check your docker networks with `docker network ls`. When you run the test command, specify the Docker network with `--network `. Read more about docker networking [here](https://docs.docker.com/engine/network/). + +

Test all

+ +Intended for running tests across all categories. Each flag should have a prefix for its category (i.e.: the flag `--endpoints` from the beacon tests becomes `--beacon-endpoints`). For details about each category refer to their respective sections. + + + + + Regular tests intended for relatively fast run, without putting any major load on any tested system. + Based on which stage you are with your cluster creation, some steps are easened. + + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--peers-enrs` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" \ + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` + +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--peers-definition-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-definition-file="/opt/charon/test/.charon/cluster-definition.json" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" \ + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--peers-lock-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-lock-file="/opt/charon/test/.charon/cluster-lock.json" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" + --beacon-endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` +
+
+ +
+ + + Load tests intended for more time consuming run. Beacon nodes are put under heavy load. MEV relays are required to create real blocks. + Based on which stage you are with your cluster creation, some steps are easened. + + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--peers-enrs` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + docker run --network="charon-distributed-validator-node_dvnode" -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" \ + --beacon-endpoints="http://lighthouse:5052/" \ + --beacon-simulation-file-dir="/opt/charon/test" \ + --beacon-load-test \ + --validator-api-address="lodestar:5064" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` + +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--peers-definition-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + docker run --network="charon-distributed-validator-node_dvnode" -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-definition-file="/opt/charon/test/.charon/cluster-definition.json" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" \ + --beacon-endpoints="http://lighthouse:5052/" \ + --beacon-simulation-file-dir="/opt/charon/test" \ + --beacon-load-test \ + --validator-api-address="lodestar:5064" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` + +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--peers-lock-file` flag. + - Running beacon node(s) towards which tests will be executed, supplied to `--beacon-endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + - Running validator client towards which tests will be executed. + - Running MEV relay(s) towards which tests will be executed, supplied to `--mev-endpoints` flag. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--mev-beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + docker run --network="charon-distributed-validator-node_dvnode" -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test all \ + --peers-lock-file="/opt/charon/test/.charon/cluster-lock.json" \ + --peers-private-key-file="/opt/charon/test/.charon/charon-enr-private-key" \ + --beacon-endpoints="http://lighthouse:5052/" \ + --beacon-simulation-file-dir="/opt/charon/test" \ + --beacon-load-test \ + --validator-api-address="lodestar:5064" \ + --mev-endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --mev-beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" \ + --mev-load-test \ + --infra-disk-io-test-file-dir="/opt/charon/test" + ``` + +
+
+ +
+ +
+ +

Test connection to peers

+ + Run tests towards other Charon peers to evaluate the effectiveness of a potential cluster setup. The command sets up a libp2p node, similarly to what Charon normally does. This test command **has to be run simultaneously with the other peers**. After the node is up it waits for other peers to get their nodes up and running, retrying the connection every 3 seconds. The libp2p node connects to relays (configurable with `p2p-relays` flag) and to other libp2p nodes via TCP. Other peer nodes are discoverable by using their ENRs. Note that for a peer to be successfully discovered, it needs to be connected to the same relay. After completion of the test suite the libp2p node stays alive (duration configurable with `keep-alive` flag) for other peers to continue testing against it. The node can be forcefully stopped as well. + + To be able to establish direct connection, you have to ensure: + + - Your machine is publicly accessible on the internet or at least a specific port is. + - You add flag `p2p-tcp-address` (i.e.: `127.0.0.1:9001`) flag and the port specified in it is free and publicly accessible. + - You add the flag `p2p-external-ip` (i.e.: `8.8.8.8`) and specify your public IP. + + If all points are satisfied by you and the other peers, you should be able to establish a direct TCP connection between each other. Note that a relay is still required, as it is used for peer discovery. + + Based on which stage you are with your cluster creation, some steps are easened. + + +

Pre-requisites

+ + - [ENR private key](../../learn/charon/charon-cli-reference#creating-an-enr-for-charon). + - Peers' ENRs, supplied to the `--enrs` flag. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test peers \ + --enrs="enr:-HW4QMno_MB_ID6GFVxoIQAHHVHZZZjzFctxtX2tm9D95tvaPbHathi8YUP8jh8v2YUAVu2fYWEOB_BT14pt8QgiGg2AgmlkgnY0iXNlY3AyNTZrMaECdpnK83s0dbBwCaEfDIkQ-3nJkkC93STvv6Vmi0bYlzg,enr:-HW4QO2vefLueTBEUGly5hkcpL7NWdMKWx7Nuy9f7z6XZInCbFAc0IZj6bsnmj-Wi4ElS6jNa0Mge5Rkc2WGTVemas2AgmlkgnY0iXNlY3AyNTZrMaECR9SmYQ_1HRgJmNxvh_ER2Sxx78HgKKgKaOkCROYwaDY" \ + --private-key-file="/opt/charon/test/.charon/charon-enr-private-key" + ``` +
+ +

Pre-requisites

+ + - Cluster definition file, supplied to the `--definition-file` flag. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test peers \ + --definition-file="/opt/charon/test/.charon/cluster-definition.json" \ + --private-key-file="/opt/charon/test/.charon/charon-enr-private-key" + ``` +
+ +

Pre-requisites

+ + - Cluster lock file, supplied to the `--lock-file` flag. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test peers \ + --lock-file="/opt/charon/test/.charon/cluster-lock.json" \ + --private-key-file="/opt/charon/test/.charon/charon-enr-private-key" + ``` +
+
+ +

Test beacon node

+ +Run tests on beacon node(s), to evaluate their effectiveness for a Distributed Validator cluster. The beacon node is usually the client doing the most work in a validating stack, especially with a high number of validators being serviced by the validator client(s) and Charon(s) that depend on it. + + + + + Regular tests intended for relatively fast run, without putting any major load on any tested system. + +

Pre-requisites

+ + - Running beacon node(s) towards which tests will be executed, supplied to `--endpoints` flag. + +

Example run

+ + ```shell + docker run obolnetwork/charon:v1.2.0 alpha test beacon \ + --endpoints="https://ethereum-holesky-beacon-api.publicnode.com,https://ethereum-sepolia-beacon-api.publicnode.com" + ``` +
+ + + Load tests intended for more time consuming run. Beacon nodes are put under heavy load. + + These tests include simulated workloads for an increasing number of validators, and the process takes some time (approximately ~33 minutes). It is normal to observe some warnings during the simulations. + + A file with detailed results about simulations done is saved at the current working directory (configurable by changing the volume mapped from `$(pwd)` to the desired directory). + +

Pre-requisites

+ + - Running beacon node(s) towards which tests will be executed, supplied to `--endpoints` flag. It is important that the node is expecting to handle huge load and that it is **not** a publicly accessible one, which can block you. + +

Example run

+ + ```shell + docker run --network="charon-distributed-validator-node_dvnode" -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test beacon \ + --endpoints="http://lighthouse:5052/" \ + --simulation-file-dir="/opt/charon/test" \ + --load-test + ``` +
+ +
+ +

Test validator client

+ +Run tests towards your validator client, to evaluate its effectiveness for a Distributed Validator cluster. + +Default endpoint for validator and port is used at `127.0.0.1:3600`. This can be changed by supplying different endpoint to the `--validator-api-address` flag. + +

Pre-requisites

+ +- Running validator client towards which tests will be executed. + +

Example run

+ +```shell +docker run --network="charon-distributed-validator-node_dvnode" obolnetwork/charon:v1.2.0 alpha test validator \ + --validator-api-address="lodestar:5064" +``` + +

Test MEV relay

+ +Run tests towards MEV relays, to evaluate their effectiveness for a Distributed Validator cluster. If MEV-Boost clients are configured for the distributed validator nodes, it is of utmost importance that the relays they connect to are fast and reliable. If not, the chance of missing a block proposal increases significantly. Supplying `--beacon-node-endpoint` and `--load-test` flags allows the test to ask relays for real MEV headers, increasing the accuracy (and duration) of this test. + +At least 1 endpoint is required to be supplied to the `--endpoints` flag. + + + +

Pre-requisites

+ + - Running MEV relay(s) towards which tests will be executed. + +

Example run

+ + ```shell + docker run obolnetwork/charon:v1.2.0 alpha test mev \ + --endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" + ``` + +
+ + +

Pre-requisites

+ + - Running MEV relay(s) towards which tests will be executed. + - Running beacon node which will be used for fetching data required by the MEV relay for block creation, supplied to `--beacon-node-endpoint`. There is no restrictions on the node and a public one can be used. + +

Example run

+ + ```shell + docker run obolnetwork/charon:v1.2.0 alpha test mev \ + --endpoints="https://0xa15b52576bcbf1072f4a011c0f99f9fb6c66f3e1ff321f11f461d15e31b1cb359caa092c71bbded0bae5b5ea401aab7e@aestus.live,https://0xa7ab7a996c8584251c8f925da3170bdfd6ebc75d50f5ddc4050a6fdc77f2a3b5fce2cc750d0865e05d7228af97d69561@agnostic-relay.net,https://0x8b5d2e73e2a3a55c6c87b8b6eb92e0149a125c852751db1422fa951e42a09b82c142c3ea98d0d9930b056a3bc9896b8f@bloxroute.max-profit.blxrbdn.com,https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net,https://0xa1559ace749633b997cb3fdacffb890aeebdb0f5a3b6aaa7eeeaf1a38af0a8fe88b9e4b1f61f236d2e64d95733327a62@relay.ultrasound.money,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@regional.titanrelay.xyz,https://0x8c4ed5e24fe5c6ae21018437bde147693f68cda427cd1122cf20819c30eda7ed74f72dece09bb313f2a1855595ab677d@global.titanrelay.xyz" \ + --load-test \ + --beacon-node-endpoint="https://ethereum-beacon-api.publicnode.com" + ``` + +
+ +
+ +

Test machine and network performance

+ +Run tests of your machine and network, to evaluate their effectiveness for a Distributed Validator cluster. Distributed Validators need stable, low-latency, internet, a reasonable amount of RAM, and a highly performant disk drive for storage. This test aims to analyse these requirements to give an overview of the systems suitability. + +

Pre-requisites

+ +None. + +

Example run

+ + ```shell + docker run -u $(id -u):$(id -g) --rm -v "$(pwd):/opt/charon/test" obolnetwork/charon:v1.2.0 alpha test infra \ + --disk-io-test-file-dir=/opt/charon/test + ``` + +
+
diff --git a/versioned_docs/version-v1.2.0/run/running/_category_.json b/versioned_docs/version-v1.2.0/run/running/_category_.json new file mode 100644 index 0000000000..bf519c3b35 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/running/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Running your DV", + "position": 3, + "collapsed": true +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/running/activate-dv.mdx b/versioned_docs/version-v1.2.0/run/running/activate-dv.mdx new file mode 100644 index 0000000000..f1627775fd --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/running/activate-dv.mdx @@ -0,0 +1,37 @@ +--- +sidebar_position: 1 +description: Activate the Distributed Validator using the deposit contract +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Activate a DV + +If you have successfully created a distributed validator and you are ready to activate it, congratulations! 🎉 + +Once you have connected all of your Charon clients together, synced all of your ethereum nodes such that the monitoring indicates that they are all healthy and ready to operate, **ONE operator** may proceed to deposit and activate the validator(s). + +The `deposit-data.json` to be used to deposit will be located in each operator's `.charon` folder. The copies across every node should be identical and any of them can be uploaded. + +:::danger +If you are being given a `deposit-data.json` file that you didn't generate yourself, please take extreme care to ensure this operator has not given you a malicious `deposit-data.json` file that is not the one you expect. Cross reference the files from multiple operators if there is any doubt. Activating the wrong validator or an invalid deposit could result in complete theft or loss of funds. +::: + +Use any of the following tools to deposit. Please use the third-party tools at your own risk and always double check the staking contract address. + +* Obol Distributed Validator Launchpad +* ethereum.org Staking Launchpad +* From a SAFE Multisig:
+(Repeat these steps for every validator to deposit in your cluster) + * From the SAFE UI, click on New Transaction then Transaction Builder to create a new custom transaction + * Enter the beacon chain contract for Deposit on mainnet - you can find it here + * Fill the transaction information + * Set amount to 32 in ETH + * Use your deposit-data.json to fill the required data : pubkey,withdrawal credentials,signature,deposit_data_root. Make sure to prefix the input with 0x to format them in bytes + * Click on Add transaction + * Click on Create Batch + * Click on Send Batch, you can click on Simulate to check if the transaction will execute successfully + * Get the minimum threshold of signatures from the other addresses and execute the custom transaction + +The activation process can take a minimum of 16 hours, with the maximum time to activation being dictated by the length of the activation queue, which can be weeks. diff --git a/versioned_docs/version-v1.2.0/run/running/monitoring.md b/versioned_docs/version-v1.2.0/run/running/monitoring.md new file mode 100644 index 0000000000..7d5fdacd47 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/running/monitoring.md @@ -0,0 +1,90 @@ +--- +sidebar_position: 4 +description: Add monitoring credentials to help the Obol Team monitor the health of your cluster +--- +# Monitoring Your Node + +This comprehensive guide will assist you in effectively monitoring your Charon clusters and setting up alerts by running your own Prometheus and Grafana server. If you want to use Obol’s [public dashboard](https://grafana.monitoring.gcp.obol.tech/d/d895e47a-3c2d-46b7-9b15-8f31202681af/clusters-aggregate-view?orgId=6) instead of running your servers, refer to [this section](../../run/start/obol-monitoring.md) in Obol docs that teaches you how to push Prometheus metrics to Obol. + +To explain quickly, Prometheus generates the metrics and Grafana visualizes them. To learn more about Prometheus and Grafana, visit [here](https://grafana.com/docs/grafana/latest/getting-started/get-started-grafana-prometheus/). If you are using **[CDVN repository](https://github.com/ObolNetwork/charon-distributed-validator-node)** or **[CDVC repository](https://github.com/ObolNetwork/charon-distributed-validator-cluster)**, then Prometheus and Grafana are part of docker compose file and will be installed when you run `docker compose up`. + +The local Grafana server will have a few pre-built dashboards: + +1. Charon Overview + + This is the main dashboard that provides all the relevant details about the Charon node, for example - peer connectivity, duty completion, health of beacon node and downstream validator, etc. To open, navigate to `charon-distributed-validator-node` directory and open the following `uri` in the browser `http://localhost:3000/d/d6qujIJVk/`. + +2. Single Charon Node Dashboard (deprecated) + + This is an older dashboard Charon node monitoring which is now deprecated. If you are still using it, we would highly recommend to move to Charon Overview for most up to date panels. + +3. Charon Log Dashboard + + This dashboard can be used to query the logs emitted while running your Charon node. It utilises [Grafana Loki](https://grafana.com/oss/loki/). This dashboard is not active by default and should only be used in debug mode. Refer to [advanced docker config](../../adv/advanced/adv-docker-configs.md) section on how to set up a debug mode. + +| Alert Name | Description | Troubleshoot | +| --- | --- | --- | +| ClusterBeaconNodeDown | This alert is activated when the beacon node in a the cluster is offline. The beacon node is crucial for validating transactions and producing new blocks. Its unavailability could disrupt the overall functionality of the cluster. | Most likely data is corrupted. Wipe data from the point you know data was corrupted and restart beacon node so it can be synced again. | +| ClusterMissedAttestations | This alert indicates that there have been missed attestations in the cluster. Missed attestations may suggest that validators are not operating correctly, compromising the security and efficiency of the cluster. | This alert is triggered when 3 attestations are missed in 2 minutes. Check if the minimum threshold of peers are online. If correct, check for beacon node API errors and downstream validator errors using Loki. Lastly, debug from Docker using `docker compose debug`. | +| ClusterInUnknownStatus | This alert is designed to activate when a node within the cluster is detected to be in an unknown state. The condition is evaluated by checking whether the maximum of the `app_monitoring_readyz` metric is 0. | This is most likely a bug in Charon. Report to us via [Discord](https://discord.com/channels/849256203614945310/970759460693901362). | +| ClusterInsufficientPeers | This alert is set to activate when the number of peers for a node in the cluster is insufficient. The condition is evaluated by checking whether the maximum of the `app_monitoring_readyz` equals 4. | If you are running group cluster, check with other peers to troubleshoot the issue. If you are running solo cluster, look into other machines running the DVs to find the problem. | +| ClusterFailureRate | This alert is activated when the failure rate of the cluster exceeds a certain threshold, more specifically - more than 5% failures in duties in the last 6 hours. | Check the upstream and downstream dependencies, latency and hardware issues. | +| ClusterVCMissingValidators | This alert is activated if any validators in the cluster are missing. This happens when validator client cannot load validator keys in the past 10 minutes. | Find if validator keys are missing and load them. | +| ClusterHighPctFailedSyncMsgDuty | This alert is activated if a high percentage of sync message duties failed in the cluster. The alert is activated if the sum of the increase in failed duties tagged with "sync_message" in the last hour divided by the sum of the increase in total duties tagged with "sync_message" in the last hour is greater than 10%. | This may be due to limitations in beacon node performance on nodes within the cluster. In charon, this duty is the most demanding, however, an increased failure rate does not impact rewards. | +| ClusterNumConnectedRelays | This alert is activated if the number of connected relays in the cluster falls to 0. | Make sure correct relay is configured. If you still get the error report to us via [Discord](https://discord.com/channels/849256203614945310/970759460693901362). | +| PeerPingLatency | This alert is activated if the 90th percentile of the ping latency to the peers in a cluster exceeds 400ms within 2 minutes. | Make sure to set up stable and high speed internet connection. If you have geographically distributed nodes, make sure latency does not go over 250 ms. | +| ClusterBeaconNodeZeroPeers | This alert is activated when beacon node cannot find peers. | Go to docs of beacon node client to troubleshoot. Make sure there is no port overlap and p2p discovery is open. | + +## Setting Up a Contact Point + +When alerts are triggered, they are routed to contact points according notification policies. For this, contact points must be added. Grafana supports several kind of contact points like email, PagerDuty, Discord, Slack, Telegram etc. This document will teach how to add Discord channel as contact point. + +1. On left nav bar in Grafana console, under `Alerts` section, click on contact points. +2. Click on `+ Add contact point`. It will show following page. Choose Discord in the `Integration` drop down. + + ![AlertsContactPoint](/img/AlertsContactPoint.png) + +3. Give a descriptive name to the alert. Create a channel in Discord and copy its `webhook url`. Once done, click `Save contact point` to finish. +4. When the alerts are fired, it will send without filling in the variables for cluster detail. For example, `cluster_hash` variable is missing here `cluster_hash = {{.cluster_hash}}`. This is done to save disk space. To find the details, use `docker compose -f docker-compose.yml -f compose-debug.yml up`. More description [here](https://docs.obol.tech/docs/advanced/adv-docker-configs). + +## Best Practices for Monitoring Charon Nodes & Cluster + +- **Establish Baselines**: Familiarize yourself with the normal operation metrics like CPU, memory, and network usage. This will help you detect anomalies. +- **Define Key Metrics**: Set up alerts for essential metrics, encompassing both system-level and Charon-specific ones. +- **Configure Alerts**: Based on these metrics, set up actionable alerts. +- **Monitor Network**: Regularly assess the connectivity between nodes and the network. +- **Perform Regular Health Checks**: Consistently evaluate the status of your nodes and clusters. +- **Monitor System Logs**: Keep an eye on logs for error messages or unusual activities. +- **Assess Resource Usage**: Ensure your nodes are neither over- nor under-utilized. +- **Automate Monitoring**: Use automation to ensure no issues go undetected. +- **Conduct Drills**: Regularly simulate failure scenarios to fine-tune your setup. +- **Update Regularly**: Keep your nodes and clusters updated with the latest software versions. + +## Third-Party Services for Uptime Testing + +- [updown.io](https://updown.io/) +- [Grafana synthetic Monitoring](https://grafana.com/grafana/plugins/grafana-synthetic-monitoring-app/) + +## Key metrics to watch to verify node health based on jobs + +**CPU Usage**: High or spiking CPU usage can be a sign of a process demanding more resources than it should. + +**Memory Usage**: If a node is consistently running out of memory, it could be due to a memory leak or simply under-provisioning. + +**Disk I/O**: Slow disk operations can cause applications to hang or delay responses. High disk I/O can indicate storage performance issues or a sign of high load on the system. + +**Network Usage**: High network traffic or packet loss can signal network configuration issues, or that a service is being overwhelmed by requests. + +**Disk Space**: Running out of disk space can lead to application errors and data loss. + +**Uptime**: The amount of time a system has been up without any restarts. Frequent restarts can indicate instability in the system. + +**Error Rates**: The number of errors encountered by your application. This could be 4xx/5xx HTTP errors, exceptions, or any other kind of error your application may log. + +**Latency**: The delay before a transfer of data begins following an instruction for its transfer. + +It is also important to check: + +- NTP clock skew; +- Process restarts and failures (eg. through `node_systemd`); +- Alert on high error and panic log counts. diff --git a/versioned_docs/version-v1.2.0/run/running/quickstart-exit.mdx b/versioned_docs/version-v1.2.0/run/running/quickstart-exit.mdx new file mode 100644 index 0000000000..6ded736083 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/running/quickstart-exit.mdx @@ -0,0 +1,302 @@ +--- +sidebar_position: 3 +description: Exit a validator +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; +import CodeBlock from '@theme/CodeBlock'; + +# Exit a DV + +Users looking to exit staking entirely and withdraw their full balance back must also sign and broadcast a "voluntary exit" message with validator keys which will start the process of exiting from staking. This is done with your validator client and submitted to your beacon node, and does not require gas. In the case of a DV, each Charon node needs to broadcast a partial exit to the other nodes of the cluster. Once a threshold of partial exits has been received by any node, the full voluntary exit will be sent to the beacon chain. + +This process will take 27 hours or longer depending on the current length of the exit queue. + +:::info +- A threshold of operators needs to run the exit command for the exit to succeed. +- If a Charon client restarts after the exit command is run but before the threshold is reached, it will lose the partial exits it has received from the other nodes. If all Charon clients restart and thus all partial exits are lost before the required threshold of exit messages are received, operators will have to rebroadcast their partial exit messages. +::: + +## Run the `voluntary-exit` command on your validator client + +Run the appropriate command on your validator client to broadcast an exit message from your validator client to its upstream Charon client. + +It needs to be the validator client that is connected to your Charon client taking part in the DV, as you are only signing a partial exit message, with a partial private key share, which your Charon client will combine with the other partial exit messages from the other operators. + +:::info +- All operators need to use the same `EXIT_EPOCH` for the exit to be successful. Assuming you want to exit as soon as possible, the default epochs included in the below commands should be sufficient for the respective network. +- Partial exits can be broadcasted by any validator client as long as the sum reaches the threshold for the cluster. +::: + + + + + +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-teku-1 /opt/teku/bin/teku voluntary-exit \
+            --beacon-node-api-endpoint="http://charon:3600/" \
+            --confirmation-enabled=false \
+            --validator-keys="/opt/charon/validator_keys:/opt/charon/validator_keys" \
+            --epoch=256`}
+          
+        
+
+ + The following executes an interactive command inside the Nimbus VC container. It copies all files and directories from the Keystore path /home/user/data/charon to the newly created /home/user/data/wd directory. +

+
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-nimbus-1 /bin/bash -c ' \
+        
+            mkdir /home/user/data/wd
+            cp -r /home/user/data/charon/ /home/user/data/wd
+                
+            /home/user/nimbus_beacon_node deposits exit --all --epoch=256 --rest-url=http://charon:3600/ --data-dir=/home/user/data/wd/'`}
+          
+        
+
+ + The following executes an interactive command inside the Lodestar VC container to exit all validators. +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-lodestar-1 node /usr/app/packages/cli/bin/lodestar validator voluntary-exit \
+            --beaconNodes="http://charon:3600" \
+            --dataDir=/opt/data \
+            --exitEpoch=256 \
+            --network=holesky \
+            --yes`}
+          
+        
+
+ + The following executes an interactive command inside the Lighthouse VC container to exit all validators. The exit is submitted for the current epoch. +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-lighthouse-1 /bin/bash -c '\
+                for file in /opt/charon/keys/*; do \
+                  filename=$(basename $file);
+                  if [[ $filename == *".json"* ]]; then
+                  `}
+                    {String.raw`  keystore=$`}
+                    {String.raw`{filename%.*};
+                    `}
+                    {String.raw`lighthouse account validator exit \
+                      --beacon-node http://charon:3600 \
+                      --keystore /opt/charon/keys/$keystore.json \
+                      --network holesky \
+                      --password-file /opt/charon/keys/$keystore.txt \
+                      --no-confirmation \
+                      --no-wait;
+                  fi;
+                done;'`}
+          
+        
+
+ + Currently voluntary exits through Prysm are not supported. This is because Prysm support voluntary exits only if both the validator client and the beacon node are running on Prysm. Note that this is incompatible with Charon, as the Charon client intercepts the communication between the validator client and the consensus layer. + + + Voluntary exit can be submitted directly through Charon as well. The partially signed exit messages are stored (centrally) on Obol's infrastructure. Exits through Charon are submitted per validator. All active validators public keys for a given cluster lock can be listed with: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit active-validator-list \
+    --beacon-node-endpoints="http://lighthouse:5052"'`}
+          
+        
+ Then a signed partial exit for validator can be submitted by: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit sign \
+    --beacon-node-endpoints="http://lighthouse:5052" \
+    --validator-public-key="" \
+    --publish-timeout="5m"'`}
+          
+        
+ After a sufficient amount of signed partial exits from node operators in the cluster is cumulated, a full (complete) exit is created. The threshold is the same as the one submitted during the cluster creation. After a full exit message is created, any operator from the cluster can broadcast it to the beacon chain with: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit broadcast \
+    --beacon-node-endpoints="http://lighthouse:5052" \
+    --validator-public-key="" \
+    --publish-timeout="5m"'`}
+          
+        
+
+ +

Exit a distributed validator using DappNode

+
    +
  1. + Navigate to the config tab of your Obol DappNode package. Click 'Packages', then click 'My Packages', and enter the Obol package. Go to the config tab. At the bottom right corner of the page, click on 'Show Advanced Editor'. + Config Tab +
  2. +
  3. + The advanced editor config page provides ENV configs for each validator. Scroll to the validator number you want to exit and type “true” in the column opposite SIGN_EXIT. + Config Tab +
  4. +
  5. + Scroll to the bottom of the page and click the 'update' button for the changes to take effect. + Config Tab +
  6. +
  7. + Check your logs to confirm the exit process has started. +
  8. +
+
+
+
+ + + +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-teku-1 /opt/teku/bin/teku voluntary-exit \
+            --beacon-node-api-endpoint="http://charon:3600/" \
+            --confirmation-enabled=false \
+            --validator-keys="/opt/charon/validator_keys:/opt/charon/validator_keys" \
+            --epoch=194048`}
+          
+        
+
+ + The following executes an interactive command inside the Nimbus VC container. It copies all files and directories from the Keystore path /home/user/data/charon to the newly created /home/user/data/wd directory. +

+
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-nimbus-1 /bin/bash -c ' \
+            
+            mkdir /home/user/data/wd
+            cp -r /home/user/data/charon/ /home/user/data/wd
+            
+            /home/user/nimbus_beacon_node deposits exit --all --epoch=194048 --rest-url=http://charon:3600/ --data-dir=/home/user/data/wd/'`}
+          
+        
+
+ + The following executes an interactive command inside the Lodestar VC container to exit all validators. +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-lodestar-1 node /usr/app/packages/cli/bin/lodestar validator voluntary-exit \
+            --beaconNodes="http://charon:3600" \
+            --dataDir=/opt/data \
+            --exitEpoch=194048 \
+            --network=mainnet \
+            --yes`}
+          
+        
+
+ + The following executes an interactive command inside the Lighthouse VC container to exit all validators. The exit is submitted for the current epoch. +
+          
+            {String.raw`docker exec -it charon-distributed-validator-node-lighthouse-1 /bin/bash -c '\
+                for file in /opt/charon/keys/*; do \
+                  filename=$(basename $file);
+                  if [[ $filename == *".json"* ]]; then
+                  `}
+                    {String.raw`  keystore=$`}
+                    {String.raw`{filename%.*};
+                    `}
+                    {String.raw`lighthouse account validator exit \
+                      --beacon-node http://charon:3600 \
+                      --keystore /opt/charon/keys/$keystore.json \
+                      --network mainnet \
+                      --password-file /opt/charon/keys/$keystore.txt \
+                      --no-confirmation \
+                      --no-wait;
+                  fi;
+                done;'`}
+          
+        
+
+ + Currently voluntary exits through Prysm are not supported. This is because Prysm support voluntary exits only if both the validator client and the beacon node are running on Prysm. Note that this is incompatible with Charon, as the Charon client intercepts the communication between the validator client and the consensus layer. + + + Voluntary exit can be submitted directly through Charon as well. The partially signed exit messages are stored (centrally) on Obol's infrastructure. Exits through Charon are submitted per validator. All active validators public keys for a given cluster lock can be listed with: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit active-validator-list \
+    --beacon-node-endpoints="http://lighthouse:5052"'`}
+          
+        
+ Then a signed partial exit for validator can be submitted by: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit sign \
+    --beacon-node-endpoints="http://lighthouse:5052" \
+    --validator-public-key="" \
+    --publish-timeout="5m"'`}
+          
+        
+ After a sufficient amount of signed partial exits from node operators in the cluster is cumulated, a full (complete) exit is created. The threshold is the same as the one submitted during the cluster creation. After a full exit message is created, any operator from the cluster can broadcast it to the beacon chain with: +
+          
+        {String.raw`docker exec -it charon-distributed-validator-node-charon-1 /bin/sh -c 'charon exit broadcast \
+    --beacon-node-endpoints="http://lighthouse:5052" \
+    --validator-public-key="" \
+    --publish-timeout="5m"'`}
+          
+        
+
+ +

Exit a distributed validator using DappNode

+ Below steps provide steps to exit a validator using DappNode. +
    +
  1. + Navigate to the config tab of your Obol DappNode package. Click 'Packages', then click 'My Packages', and enter the Obol package. Go to the config tab. At the bottom right corner of the page, click on 'Show Advanced Editor'. + Config Tab +
  2. +
  3. + The advanced editor config page provides ENV configs for each validator. Scroll to the validator number you want to exit and type “true” in the column opposite SIGN_EXIT. + Config Tab +
  4. +
  5. + Scroll to the bottom of the page and click the 'update' button for the changes to take effect. + Config Tab +
  6. +
  7. + Check your logs to confirm the exit process has started. +
  8. +
+
+
+
+
+ +When submitting through a validator client (not through charon directly), once a threshold of exit signatures has been received by any single Charon client, it will craft a valid voluntary exit message and will submit it to the beacon chain for inclusion. You can monitor partial exits stored by each node in the [Grafana Dashboard](https://github.com/ObolNetwork/charon-distributed-validator-node). + +## Exit epoch and withdrawable epoch + +The process of a validator exiting from staking takes variable amounts of time, depending on how many others are exiting at the same time. + +Immediately upon broadcasting a signed voluntary exit message, the exit epoch and withdrawable epoch values are calculated based off the current epoch number. These values determine exactly when the validator will no longer be required to be online performing validation, and when the validator is eligible for a full withdrawal respectively. + +1. Exit epoch - epoch at which your validator is no longer active, no longer earning rewards, and is no longer subject to slashing rules. + :::warning + Up until this epoch (while "in the queue") your validator is expected to be online and is held to the same slashing rules as always. Do not turn your DV node off until this epoch is reached. + ::: +2. Withdrawable epoch - epoch at which your validator funds are eligible for a full withdrawal during the next validator sweep. + This occurs 256 epochs after the exit epoch, which takes ~27.3 hours. + +## How to verify a validator exit + +Consult the examples below and compare them to your validator's monitoring to verify that exits from each operator in the cluster are being received. This example is a cluster of 4 nodes with 2 validators and threshold of 3 nodes broadcasting exits are needed. + +1. Operator 1 broadcasts an exit on validator client 1. + ![Verify in Grafana Exit panel](/img/ExitPromQuery-01.png) + ![Verify in Grafana Exit panel](/img/DutyExit-01.png) +2. Operator 2 broadcasts an exit on validator client 2. + ![Verify in Grafana Exit panel](/img/ExitPromQuery-02.png) + ![Verify in Grafana Exit panel](/img/DutyExit-02.png) +3. Operator 3 broadcasts an exit on validator client 3. + ![Verify in Grafana Exit panel](/img/ExitPromQuery-03.png) + ![Verify in Grafana Exit panel](/img/DutyExit-03.png) + +At this point, the threshold of 3 has been reached and the validator exit process will start. The logs will show the following: +![Verify in Grafana Exit panel](/img/ExitLogs.png) + +:::tip +Once a validator has broadcasted an exit message, it must continue to validate for at least 27 hours or longer. Do not shut off your distributed validator nodes until your validator is fully exited. +::: diff --git a/versioned_docs/version-v1.2.0/run/running/update.mdx b/versioned_docs/version-v1.2.0/run/running/update.mdx new file mode 100644 index 0000000000..dfa59547f9 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/running/update.mdx @@ -0,0 +1,85 @@ +--- +sidebar_position: 2 +description: Update your DV cluster with the latest Charon release +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Update a DV + +It is highly recommended to upgrade your DV stack from time to time. This ensures that your node is secure, performant, up-to-date and you don't miss important hard forks. + +To do this, follow these steps: + +### Navigate to the node directory + + + +
+      
+        cd charon-distributed-validator-node
+      
+    
+
+ + +
+      
+        cd charon-distributed-validator-cluster
+      
+    
+
+
+ +### Pull latest changes to the repo + +```shell +git pull +``` + +### Create (or recreate) your DV stack + +```shell +docker compose up -d --build +``` + +:::danger +If you run more than one node in a DV Cluster, please take caution upgrading them simultaneously. Particularly if you are updating or changing the validator client used or recreating disks. It is recommended to update nodes on a sequential basis to minimse liveness and safety risks. +::: + +### Conflicts + +You may get a `git conflict` error similar to this: + +```markdown +error: Your local changes to the following files would be overwritten by merge: +prometheus/prometheus.yml + +Please commit your changes or stash them before you merge. +``` + +This is probably because you have made some changes to some of the files, for example to the `prometheus/prometheus.yml` file. + +To resolve this error, you can either: + +- Stash and reapply changes if you want to keep your custom changes: + + ```shell + git stash # Stash your local changes + git pull # Pull the latest changes + git stash apply # Reapply your changes from the stash + docker-compose up -d --build # Recreate your DV stack + ``` + + After reapplying your changes, manually resolve any conflicts that may arise between your changes and the pulled changes using a text editor or Git's conflict resolution tools. + +- Override changes and recreate configuration if you don't need to preserve your local changes and want to discard them entirely: + + ```shell + git reset --hard # Discard all local changes and override with the pulled changes + git pull # Pull the latest changes + docker-compose up -d --build # Recreate your DV stack + ``` + + After overriding the changes, you will need to recreate your DV stack using the updated files. + By following one of these approaches, you should be able to handle Git conflicts when pulling the latest changes to your repository, either preserving your changes or overriding them as per your requirements. diff --git a/versioned_docs/version-v1.2.0/run/start/_category_.json b/versioned_docs/version-v1.2.0/run/start/_category_.json new file mode 100644 index 0000000000..f020212956 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/_category_.json @@ -0,0 +1,5 @@ +{ + "label": "Quickstart", + "position": 1, + "collapsed": true +} diff --git a/versioned_docs/version-v1.2.0/run/start/obol-monitoring.md b/versioned_docs/version-v1.2.0/run/start/obol-monitoring.md new file mode 100644 index 0000000000..8f721d6ff2 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/obol-monitoring.md @@ -0,0 +1,57 @@ +--- +sidebar_position: 5 +description: Add monitoring credentials to help the Obol Team monitor the health of your cluster +--- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# Push Metrics to Obol Monitoring + +:::info +This is **optional** and does not confer any special privileges within the Obol Network. +::: + + + + +This is for operators using the [example repo](https://github.com/ObolNetwork/charon-distributed-validator-node) from our [quickstart guide](../start/quickstart_overview.md), and have been provided with **Monitoring Credentials** used to push distributed validator metrics to Obol's central Prometheus cluster to monitor, analyze, and improve your Distributed Validator Cluster's performance. (For example, this is necessary to participate in the Obol [Techne](https://squadstaking.com/techne) credential program.) + +## Update the monitoring token in the `.env` file +- Inside your `.env` file, uncomment the `PROM_REMOTE_WRITE_TOKEN` line by removing the `#` symbol. +- Enter your monitoring token in the format shown below: + +```shell +PROM_REMOTE_WRITE_TOKEN=your_monitoring_token +``` + +## Save the `.env` file and restart Prometheus +Save the `.env` file, and run the `docker compose up -d` command, and prometheus will be restarted to apply the changes. + +```shell +docker compose up -d +# Alternatively +docker compose restart prometheus +``` + + + + +The last step in your DappNode setup is to add your Monitoring Credentials. This allows you to push distributed validator metrics to Obol’s central Prometheus cluster for monitoring, analysis, and performance optimization of your Distributed Validator Cluster. It also facilitates easier troubleshooting with the Obol team when needed. + +
    +
  1. Get Prometheus credentials from Obol, which will look like:
    obol20tnt8UC...
  2. +
  3. Navigate to your Obol package in DappNode and go to the Config tab.
  4. + Go to config tab +
  5. + At the bottom of the page, add the credential token under **Prometheus Monitoring Credentials (optional)**, then click the Update button. +
  6. +
  7. + Return to the Info tab, scroll down to the Containers section, and click the down arrow to view all container statuses. If the Prometheus container is stopped, please restart it. + Restart containers +
  8. +
+ +
+
+ + diff --git a/versioned_docs/version-v1.2.0/run/start/quickstart-builder-api.mdx b/versioned_docs/version-v1.2.0/run/start/quickstart-builder-api.mdx new file mode 100644 index 0000000000..9b00a6dada --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/quickstart-builder-api.mdx @@ -0,0 +1,165 @@ +--- +sidebar_position: 4 +description: Run a distributed validator cluster with the builder API (MEV-Boost) +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Enable MEV + +This quickstart guide focuses on configuring the builder API for Charon and supported validator and consensus clients. + +## Getting started with Charon & the Builder API + +Running a distributed validator cluster with the builder API enabled will give the validators in the cluster access to the builder network. This builder network is a network of "Block Builders" +who work with MEV searchers to produce the most valuable blocks a validator can propose. + +[MEV-Boost](https://boost.flashbots.net/) is one such product from Flashbots that enables you to ask multiple +block relays (who communicate with the "Block Builders") for blocks to propose. The block that pays the largest reward to the validator will be signed and returned to the relay for broadcasting to the wider +network. The end result for the validator is generally an increased APR as they receive some share of the MEV. + +:::info +Before completing this guide, please check your cluster version, which can be found inside the `cluster-lock.json` file. If you are using cluster-lock version `1.7.0` or higher, Charon seamlessly accommodates all validator client implementations within a MEV-enabled distributed validator cluster. + +For clusters with a `cluster-lock.json` version `1.6.0` and below, Charon is compatible only with [Teku](https://github.com/ConsenSys/teku). Use the version history feature of this documentation to see the instructions for configuring a cluster in that manner (`v0.16.0`). +::: + +## Client configuration + +:::note +You need to add CLI flags to your consensus client, Charon client, and validator client, to enable the builder API. + +You need all operators in the cluster to have their nodes properly configured to use the builder API, or you risk missing a proposal. +::: + +### Charon + +Charon supports builder API with the `--builder-api` flag. To use builder API, one simply needs to add this flag to the `charon run` command: + +```shell +charon run --builder-api +``` + +### Consensus Clients + +The following flags need to be configured on your chosen consensus client. A Flashbots relay URL is provided for example purposes, you should use the [charon test mev command](../../run/prepare/test-command.mdx#test-mev-relay) and select the two or three relays with the lowest latency to your node that also conform to your block building preferences. A public list of MEV relays is available [here](https://github.com/eth-educators/ethstaker-guides/blob/main/MEV-relay-list.md#mev-relay-list-for-mainnet). + + + + Teku can communicate with a single relay directly: +
+      
+    {String.raw`teku --builder-endpoint="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"`}
+      
+    
+ Or you can configure it to communicate with a local MEV-boost sidecar to configure multiple relays: +
+      
+    {String.raw`teku --builder-endpoint=http://mev-boost:18550`}
+      
+    
+
+ + Lighthouse can communicate with a single relay directly: +
+      
+    {String.raw`lighthouse bn --builder="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"`}
+      
+    
+ Or you can configure it to communicate with a local MEV-boost sidecar to configure multiple relays: +
+      
+    {String.raw`lighthouse bn --builder="http://mev-boost:18550"`}
+      
+    
+
+ + Prysm can communicate with a single relay directly: +
+      
+    {String.raw`prysm beacon-chain --http-mev-relay="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"`}
+      
+    
+
+ + Nimbus can communicate with a single relay directly: +
+      
+        {String.raw`nimbus_beacon_node \
+        --payload-builder=true \
+        --payload-builder-url="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"`}
+      
+    
+ You should also consider adding --local-block-value-boost=3 as a flag, to favour locally built blocks if they are withing 3% in value of the relay block, to improve the chances of a successful proposal. +
+ + Lodestar can communicate with a single relay directly: +
+      
+    {String.raw`node ./lodestar --builder --builder.urls="https://0xac6e77dfe25ecd6110b8e780608cce0dab71fdd5ebea22a16c0205200f2f8e2e3ad3b71d3499c54ad14d6c21b41a37ae@boost-relay.flashbots.net"`}
+      
+    
+
+
+ +### Validator Clients + +The following flags need to be configured on your chosen validator client + + + +
+      
+    {String.raw`teku validator-client --validators-builder-registration-default-enabled=true`}
+      
+    
+ +
+ +
+      
+    {String.raw`lighthouse vc --builder-proposals`}
+      
+    
+
+ +
+      
+    {String.raw`prysm validator --enable-builder`}
+      
+    
+
+ +
+      
+    {String.raw`nimbus_validator_client --payload-builder=true`}
+      
+    
+
+ +
+      
+    {String.raw`node ./lodestar validator --builder="true" --builder.selection="builderonly"`}
+      
+    
+
+
+ +## Verify your cluster is correctly configured + +It can be difficult to confirm everything is configured correctly with your cluster until a proposal opportunity arrives, but here are some things you can check. + +When your cluster is running, you should see if Charon is logging something like this each epoch: + +```log +13:10:47.094 INFO bcast Successfully submitted validator registration to beacon node {"delay": "24913h10m12.094667699s", "pubkey": "84b_713", "duty": "1/builder_registration"} +``` + +This indicates that your Charon node is successfully registering with the relay for a blinded block when the time comes. + +If you are using the [ultrasound relay](https://relay.ultrasound.money), you can enter your cluster's distributed validator public key(s) into their website, to confirm they also see the validator as correctly registered. + +You should check that your validator client's logs look healthy, and ensure that you haven't added a `fee-recipient` address that conflicts with what has been selected by your cluster in your `cluster-lock.json` file, as that may prevent your validator from producing a signature for the block when the opportunity arises. You should also confirm the same for all of the other peers in your cluster. + +Once a proposal has been made, you should look at the `Block Extra Data` field under `Execution Payload` for the block on [Beaconcha.in](https://beaconcha.in/block/18450364), and confirm there is text present, this generally suggests the block came from a builder, and was not a locally constructed block. diff --git a/versioned_docs/version-v1.2.0/run/start/quickstart_alone.mdx b/versioned_docs/version-v1.2.0/run/start/quickstart_alone.mdx new file mode 100644 index 0000000000..d2cce55cf3 --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/quickstart_alone.mdx @@ -0,0 +1,199 @@ +--- +sidebar_position: 2 +description: Create a DV alone +--- +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +# Create a DV Alone + +:::info +It is possible for a single operator to manage all of the nodes of a DV cluster. The nodes can be run on a single machine, which is only suitable for testing, or the nodes can be run on multiple machines, which is expected for a production setup. + +The private key shares can be created centrally and distributed securely to each node. Alternatively, the private key shares can be created in a lower-trust manner with a [Distributed Key Generation](../../learn/intro/key-concepts.md#distributed-validator-key-generation-ceremony) process, which avoids the validator private key being stored in full anywhere, at any point in its lifecycle. Follow the [group quickstart](./quickstart_group.mdx) instead for this latter case. +::: + +## Pre-requisites +
    +
  • A basic knowledge of Ethereum nodes and validators.
  • +
  • Ensure you have git installed.
  • +
  • Ensure you have docker installed.
  • +
  • Make sure docker is running before executing the commands below.
  • +
+ +## Step 1: Create the key shares locally + + + + Go to the the DV Launchpad and select Create a distributed validator alone. Follow the steps to configure your DV cluster. The Launchpad will give you a docker command to create your cluster.
Before you run the command, clone the CDVC repo and cd into the directory. + + ```shell + # Clone the repo + git clone https://github.com/ObolNetwork/charon-distributed-validator-cluster.git + + # Change directory + cd charon-distributed-validator-cluster/ + + # Run the command provided in the DV Launchpad "Create a cluster alone" flow + docker run -u $(id -u):$(id -g) --rm -v "$(pwd)/:/opt/charon" obolnetwork/charon:v1.2.0 create cluster --definition-file=... + ``` + +
+ + + +1. Clone the CDVC repo and cd into the directory. + + ```shell + # Clone the repo + git clone https://github.com/ObolNetwork/charon-distributed-validator-cluster.git + + # Change directory + cd charon-distributed-validator-cluster/ + ``` + +2. Run the cluster creation command, setting required flag values. + + Run the below command to create the validator private key shares and cluster artifacts locally, replacing the example values for `nodes`, `network`, `num-validators`, `fee-recipient-addresses`, and `withdrawal-addresses`. + Check the [Charon CLI reference](../../learn/charon/charon-cli-reference.md#create-a-full-cluster-locally) for additional, optional flags to set. + + ```shell + docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 create cluster \ + --nodes=4 \ + --network=holesky \ + --num-validators=1 \ + --name="Quickstart Guide Cluster" \ + --cluster-dir="cluster" \ + --fee-recipient-addresses=0x000000000000000000000000000000000000dead \ + --withdrawal-addresses=0x000000000000000000000000000000000000dead + ``` + +:::tip +If you would like your cluster to appear on the [DV Launchpad](../../learn/intro/launchpad.md), add the `--publish` flag to the command. +::: + + +
+
+ +After the `create cluster` command is run, you should have multiple subfolders within the newly created `./cluster/` folder, one for each node created. + +**Backup the `./cluster/` folder, then move on to deploying the cluster.** + +:::info +Make sure your backup is secure and private, someone with access to these files could get the validators slashed. +::: + +## Step 2: Deploy and start the nodes + + + + +:::warning +This part of the guide only runs one Execution Client, one Consensus Client, and 6 Distributed Validator Charon Client + Validator Client pairs on a single docker instance, and **is not suitable for a mainnet deployment**. (If this machine fails, there will not be any fault tolerance - the cluster will also fail.) + +For a production deployment with fault tolerance, follow the part of the guide instructing you how to distribute the nodes across multiple machines. +::: + +Run this command to start your cluster containers if you deployed using the [CDVC repo](https://github.com/ObolNetwork/charon-distributed-validator-cluster). + +```shell +# Start the distributed validator cluster +docker compose up --build -d +``` + +Check the monitoring dashboard and see if things look all right. + +```shell +# Open Grafana +open http://localhost:3000/d/laEp8vupp +``` + + + + +:::warning +To distribute your cluster across multiple machines, each node in the cluster needs one of the folders called `node*/` to be copied to it. Each folder should be copied to a [CDVN repo](https://github.com/ObolNetwork/charon-distributed-validator-node) and renamed from `node*` to `.charon`. + +Right now, the `charon create cluster` command [used earlier to create the private keys](./quickstart_alone#step-1-create-the-key-shares-locally) outputs a folder structure like `cluster/node*/`. Make sure to grab the `./node*/` folders, *rename* them to `.charon` and then move them to one of the single node repos below. Once all nodes are online, synced, and connected, you will be ready to activate your validator. +::: + + This is necessary for the folder to be found by the default `charon run` command. Optionally, it is possible to override `charon run`'s default file locations by using `charon run --private-key-file="node0/charon-enr-private-key" --lock-file="node0/cluster-lock.json"` for each instance of Charon you start (substituting `node0` for each node number in your cluster as needed). + + :point_right: Use the single node [docker compose](https://github.com/ObolNetwork/charon-distributed-validator-node), the kubernetes [manifests](https://github.com/ObolNetwork/charon-k8s-distributed-validator-node), or the [helm chart](https://github.com/ObolNetwork/helm-charts) example repos to get your nodes up and connected after loading the `.charon` folder artifacts into them appropriately. +
+ +```log title="Output from create cluster" +cluster +├── node0 +│ ├── charon-enr-private-key +│ ├── cluster-lock.json +│ ├── deposit-data.json +│ └── validator_keys +│ ├── keystore-0.json +│ ├── keystore-0.txt +│ ├── ... +│ ├── keystore-N.json +│ └── keystore-N.txt +├── node1 +│ ├── charon-enr-private-key +│ ├── cluster-lock.json +│ ├── deposit-data.json +│ └── validator_keys +│ ├── keystore-0.json +│ ├── keystore-0.txt +│ ├── ... +│ ├── keystore-N.json +│ └── keystore-N.txt +├── node2 +│ ├── charon-enr-private-key +│ ├── cluster-lock.json +│ ├── deposit-data.json +│ └── validator_keys +│ ├── keystore-0.json +│ ├── keystore-0.txt +│ ├── ... +│ ├── keystore-N.json +│ └── keystore-N.txt +└── node3 + ├── charon-enr-private-key + ├── cluster-lock.json + ├── deposit-data.json + └── validator_keys + ├── keystore-0.json + ├── keystore-0.txt + ├── ... + ├── keystore-N.json + └── keystore-N.txt + +``` + +```log title="Folder structure to be placed on each DV node" +└── .charon + ├── charon-enr-private-key + ├── cluster-lock.json + ├── deposit-data.json + └── validator_keys + ├── keystore-0.json + ├── keystore-0.txt + ├── ... + ├── keystore-N.json + └── keystore-N.txt +``` + +:::info + Currently, the quickstart repo installs a node on the Holesky testnet. It is possible to choose a different network (another testnet, or mainnet) by overriding the `.env` file. + + `.env.sample` is a sample environment file that allows overriding default configuration defined in `docker-compose.yml`. Uncomment and set any variable to override its value. + + Setup the desired inputs for the DV, including the network you wish to operate on. Check the [Charon CLI reference](../../learn/charon/charon-cli-reference.md) for additional optional flags to set. Once you have set the values you wish to use. Make a copy of this file called `.env`. + + ```shell + # Copy ".env.sample", renaming it ".env" + cp .env.sample.holesky .env + ``` + +::: + +
+
diff --git a/versioned_docs/version-v1.2.0/run/start/quickstart_group.mdx b/versioned_docs/version-v1.2.0/run/start/quickstart_group.mdx new file mode 100644 index 0000000000..c6f7fa197a --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/quickstart_group.mdx @@ -0,0 +1,717 @@ +--- +sidebar_position: 3 +description: Create a DV with a group +--- +import Tabs from "@theme/Tabs"; +import TabItem from "@theme/TabItem"; + +# Create a DV With a Group + +This quickstart guide will walk you through creating a Distributed Validator Cluster with a number of other node operators. + +## Pre-requisites +- A basic knowledge of Ethereum nodes and validators. +- A machine that meets the [minimum requirements](../prepare/deployment-best-practices#hardware-specifications) for the network you intend to validate. +- If you are taking part using a [DappNode](https://dappnode.com/): + - A computer with an up to date version of DappNode's software and an internet connection. +- If you are taking part using [Sedge](https://www.nethermind.io/sedge), or [Charon's Distributed Validator Node](https://github.com/ObolNetwork/lido-charon-distributed-validator-node) (CDVN) starter repo: + - Ensure you have git installed. + - Ensure you have docker installed. + - Make sure docker is running before executing the commands below. + +## Step 1: Get your ENR + + + + +In order to prepare for a distributed key generation ceremony, you need to create an ENR for your Charon client. This ENR is a public/private key pair that allows the other Charon clients in the DKG to identify and connect to your node. If you are creating a cluster but not taking part as a node operator in it, you can skip this step. + +```shell +# Clone the repo +git clone https://github.com/ObolNetwork/charon-distributed-validator-node.git +# Change directory +cd charon-distributed-validator-node/ +# Use docker to create an ENR. Backup the file `.charon/charon-enr-private-key`. +docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 create enr +``` +You should expect to see a console output like this: +```logs +Created ENR private key: .charon/charon-enr-private-key +enr:-JG4QGQpV4qYe32QFUAbY1UyGNtNcrVMip83cvJRhw1brMslPeyELIz3q6dsZ7GblVaCjL_8FKQhF6Syg-O_kIWztimGAYHY5EvPgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKzMe_GFPpSqtnYl-mJr8uZAUtmkqccsAx7ojGmFy-FY4N0Y3CCDhqDdWRwgg4u +``` +:::warning +Please make sure to create a backup of the private key at `.charon/charon-enr-private-key`. Be careful not to commit it to git! **If you lose this file you won't be able to take part in the DKG ceremony nor start the DV cluster successfully.** +::: + +:::tip +If instead of being shown your `enr` you see an error saying `permission denied` then you may need to [update your docker permissions](../../adv/troubleshooting/errors.mdx#docker-permission-denied-error) to allow the command to run successfully. +::: + + + +#### Prepare an Execution and Consensus client +Before preparing the DappNode to take part in a Distributed Validator Cluster, you must ensure you have selected an execution client & consensus client on your DappNode under the 'Stakers' tab for the network you intend to validate. +

+

    +
  1. + Login to the DappNode Interface:{" "} + + Dappnode Login + + Dappnode Login +
  2. +
  3. + Click on the 'Stakers' tab on the left side, select an execution client (e.g. Geth) & consensus client (e.g. Lodestar) & click 'Apply changes'. This will start the syncing process which can take a number of hours. + Select Clients +
  4. +
  5. + Once the clients are finished syncing, it should reflect on your 'Dashboard' as shown below. + Dashboard showing chains are synced and their status are healthy +
  6. +
+

+#### Install the Obol DappNode package +With a fully synced Ethereum node now running on the DappNode, the below steps will walk through installing the Obol package via an IPFS hash and preparing for a Distributed Key Generation ceremony. Future versions of this guide will download the package from the official DappNode DappStore once a stable 1.0 release is made. +

+

    +
  1. + Before installing the package, make sure you are installing the correct one, this depends on which network your creator configures the cluster on, Holesky or Mainnet. You can find the link to both packages below: + +
  2. +
  3. + Copy the latest IPFS hash from the release details dropdown. + Retrieve IPFS Hash +
  4. +
  5. + Go back to DappNode Dashboard > Dappstore, select the 'Public' tab, and accept the terms & conditions before proceeding. + Select Public Tab +
  6. +
  7. + Paste the IPFS hash you copied from Github and click 'Search' (It may take a minute for the package to be found.) You will then be presented with the package installation page. Under the blue 'Install' button, click on 'Advanced Options' & toggle the button to 'Bypass only signed safe restriction'. + bypass only signed safe restriction +
  8. +
  9. + Click 'Install' & in the config mode page > select new cluster & submit. (if you already have the config URL, you can select URL option.) + Install Package +
  10. +
  11. + Accept the terms & conditions and the install process will begin. + Accept terms and conditions + Package Installing +
  12. +
  13. + You should now be able to see the Holesky Obol package under the 'Packages' tab. Click on the package to see important details. + Go to packages tab +
  14. +
  15. + Under the 'Info' tab, you will be see pre-generated ENRs, along with information such as the status of all five distributed validator clusters, their docker volumes & other menu options. + Get your ENR +
  16. +
  17. + Select any of the ENRs listed that are not already in use. This ENR will be used in the next step. +
  18. +
+

+
+ + +#### Installing Sedge + +First you must install Sedge, please refer to the official Sedge installation guide to do so. + +#### Check the install was successful + +Run the below command to check if your have successfully installed sedge in your computer. +```shell +sedge +``` + +Expected output: +```log +A tool to allow deploying validators with ease. + Usage: + sedge [command] + Available Commands: + cli Generate a node setup interactively + clients List supported clients + deps Manage dependencies + down Shutdown sedge running containers + generate Generate new setups according to selected options + help Help about any command + import-key Import validator keys + keys Generate keystore folder + logs Get running container logs + networks List supported networks + run Run services + show Show useful information about sedge running containers + slashing-export Export slashing protection data + slashing-import Import slashing protection data + version Print sedge version + Flags: + -h, --help help for sedge + --log-level string Set Log Level, e.g panic, fatal, error, warn, warning, info, debug, trace (default "info") + Use "sedge [command] --help" for more information about a command. +``` + +Create an ENR using charon: + +```shell +# Use docker to create an ENR. Backup the file `.charon/charon-enr-private-key`. +docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 create enr +``` + +
+ +For Step 2 of the quickstart: +- Select the **Creator** tab if you are coordinating the creation of the cluster (this role holds no position of privilege in the cluster, it only sets the initial terms of the cluster that the other operators agree to). +- Select the **Operator** tab if you are accepting an invitation to operate a node in a cluster, proposed by the cluster creator. + +## Step 2: Create a cluster or accept an invitation to a cluster + + + +

Collect addresses, configure the cluster, share the invitation

+

+ Before starting the cluster creation process, you will need to collect an + Ethereum address for each operator in the cluster. They will need to be + able to sign messages through MetaMask with this address. + (Broader wallet support will be added in future.) With these addresses + in hand, go through the cluster creation flow. +

+ + +

+ You will use the Launchpad to create an invitation, and share it with + the operators. This video shows the flow within the [DV Launchpad](../../learn/intro/launchpad.md) +

+

+ +

+

+ The following are the steps for creating a cluster. +

    +
  1. + Go to the{" "} + DV Launchpad +
  2. +
  3. + Connect your wallet + Connect your wallet +
  4. +
  5. + Select Create a Cluster with a group then{" "} + Get Started. + Get started +
  6. +
  7. Follow the flow and accept the advisories.
  8. +
  9. Configure the Cluster
  10. +
      +
    • + Input the Cluster Name & Cluster Size{" "} + (i.e. number of operators in the cluster). The threshold will + update automatically, it shows the number of nodes that need to + be functioning for the validator(s) to stay active. +
    • +
    +
  11. + Input the Ethereum addresses for each operator that you collected + previously. If you will be taking part as an operator, click the + "Use My Address" button for Operator 1. +
  12. +
      +
    • + Select the desired amount of validators (32 ETH each) the + cluster will run. (Note that the mainnet launchpad is restricted + to one validator for now.) +
    • +
    • + If you are taking part in the cluster, enter the ENR you + generated in step one in + the "What is your charon client's ENR?" field. +
    • +
    • + Enter the Principal address which should receive + the principal 32 ETH and the accrued consensus layer rewards + when the validator is exited. This can optionally be set to the + contract address of a multisig / splitter contract. +
    • +
    • + Enter the Fee Recipient address to which the + execution layer rewards will go. This can be the same as the + principal address, or it can be a different address. This can + optionally be set to the contract address of a multisig / + splitter contract. +
    • +
    +
  13. + Click Create Cluster Configuration. Review that all + the details are correct, and press Confirm and Sign{" "} + You will be prompted to sign two or three transactions with your + MetaMask wallet. These are: +
  14. +
      +
    • + The config_hash. This is a hashed representation of + the details of this cluster, to ensure everyone is agreeing to + an identical setup. +
    • +
    • + The operator_config_hash. This is your acceptance + of the terms and conditions of participating as a node operator. +
    • +
    • + Your ENR. Signing your ENR authorises the + corresponding private key to act on your behalf in the cluster. +
    • +
    +
  15. + Share your cluster invite link with the operators. Following the + link will show you a screen waiting for other operators to accept + the configuration you created. + Invite Operators +
  16. +
  17. + You can use the link to monitor how many of the operators have + already signed their approval of the cluster configuration and + submitted their ENR. +
  18. +
+

+
+ +

+ You will use the CLI to create the cluster definition file, which you + will distribute it to the operators manually. +

    +
  1. + The leader or creator of the cluster will prepare the + cluster-definition.json file for the Distributed Key Generation + ceremony using the charon create dkg command. +
  2. +
  3. + Populate the charon create dkg command with the + appropriate flags including the name, the{" "} + num-validators, the{" "} + fee-recipient-addresses, the{" "} + withdrawal-addresses, and the{" "} + operator-enrs of all the operators participating in + the cluster. +
  4. +
  5. + Run the charon create dkg command that generates DKG + cluster-definition.json file. +
    +                docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 create dkg {'\n'}
    +                --name="Quickstart" {'\n'}
    +                --num-validators=1 {'\n'}
    +                --fee-recipient-addresses="0x0000000000000000000000000000000000000000" {'\n'}
    +                --withdrawal-addresses="0x0000000000000000000000000000000000000000" {'\n'}
    +                --operator-enrs="enr:-JG4QGQpV4qYe32QFUAbY1UyGNtNcrVMip83cvJRhw1brMslPeyELIz3q6dsZ7GblVaCjL_8FKQhF6Syg-O_kIWztimGAYHY5EvPgmlkgnY0gmlwhH8AAAGJc2VjcDI1NmsxoQKzMe_GFPpSqtnYl-mJr8uZAUtmkqccsAx7ojGmFy-FY4N0Y3CCDhqDdWRwgg4u"
    +              
    + This command should output a file at + .charon/cluster-definition.json This file needs to be + shared with the other operators in a cluster. +
      +
    • + The .charon folder is hidden by default. To view + it, run ls -al .charon in your terminal. Else, if + you are on macOS, press{" "} + Cmd + Shift + . to view all hidden files in the + Finder application. +
    • +
    +
  6. +
+

+
+
+
+ +

Join the cluster prepared by the creator

+ Use the Launchpad or CLI to join the cluster configuration generated by the creator: + + + Your cluster creator needs to configure the cluster, and send you + an invite URL link to join the cluster on the Launchpad. Once you've + received the Launchpad invite link, you can begin the cluster acceptance process. +

+ +

+
    +
  1. Click on the DV launchpad link provided by the leader or creator. Make sure you recognise the domain and the person sending you the link, to ensure you are not being phished.
  2. +
  3. Connect your wallet using the Ethereum address provided to the leader. Connect Wallet
  4. +
  5. Review the operators addresses + submitted and click Get Started to continue. Get Started
  6. +
  7. Review and accept the DV Launchpad terms & conditions and advisories.
  8. +
  9. Review the cluster configuration set by the creator and add your + ENR that you generated in step 1.Review Config
  10. +
  11. + Sign the two transactions with your wallet, these are: +
      The config hash. + This is a hashed representation of all of the details for this cluster.
    +
      Your own ENR This signature authorises the key represented by + this ENR to act on your behalf in the cluster.
    +
  12. +
  13. Wait for all the other + operators in your cluster to also finish these steps.
  14. +
+
+ + You'll receive the cluster-definition.json file created by + the leader/creator. You should save it in the .charon/{" "} + folder that was created initially. (Alternatively, you can use the{" "} + --definition-file + flag to override the default expected location for this file.) + +
+
+
+Once every participating operator is ready, the next step is the distributed key generation amongst the operators. +- If you are not planning on operating a node, and were only configuring the cluster for the operators, your journey ends here. Well done! +- If you are one of the cluster operators, continue to the next step. +## Step 3: Run the Distributed Key Generation (DKG) ceremony + +:::tip +For the [DKG](../../learn/charon/dkg.md) to complete, all operators need to be running the command simultaneously. It helps if operators can agree on a certain time or schedule a video call for them to all run the command together. +::: + + + +

+ +

+1. Once all operators successfully signed, your screen will automatically advance to the next step and look like this. Click `Continue`. (If you closed the tab, you can always go back to the invite link shared by the leader and connect your wallet.) +![Config Signing Success](/img/Guide08.png) +2. Copy and run the `docker` command on the screen into your terminal. It will retrieve the remote cluster details and begin the DKG process. + ![Run the DKG](/img/Guide10.png) +3. Assuming the DKG is successful, a number of artefacts will be created in the `.charon` folder of the node. These include: + - A `deposit-data.json` file. This contains the information needed to activate the validator on the Ethereum network. + - A `cluster-lock.json` file. This contains the information needed by Charon to operate the distributed validator cluster with its peers. + - A `validator_keys/` folder. This folder contains the private key shares and passwords for the created distributed validators. + + +
+ + Once the creator gives you the cluster-definition.json file and you place it in a .charon subdirectory, run:
docker run --rm -v "$(pwd):/opt/charon" obolnetwork/charon:v1.2.0 dkg --publish
and the DKG process should begin. +
+ + Follow this step if you are signing through the DV Launchpad, importing the cluster definition URL into the DappNode package's config & then running the DKG inside the DappNode, followed by cluster run.
+ Start DKG +
    +
  1. + After all operators have signed with their wallet and has provided an ENR from the DappNode info tab, the Launchpad will instruct operators to begin the DKG ceremony. Click continue & navigate to the 'Dappnode/Avado' tab where the cluster definition URL is presented. + Dappnode Tab +
  2. +
  3. + To run the Distributed Key Generation ceremony using a DappNode, you must paste the cluster definition URL into the Obol Package interface. Go to the 'Config' tab, select 'URL' from the dropdown menu, paste the cluster definition URL you retrieved from the launchpad, into the validator `cluster-*`field which matches the cluster you took the ENR from. Example: If you picked ENR1 for signing, then you should paste the URL into Cluster-1. Finally, click the 'Update' button at the bottom of the page. + Copy URL + Paste URL + Select URL from dropdown +
  4. +
  5. + After DappNode records the cluster definition URL, go back to the 'Info' tab and restart the Charon container. + Restart Charon validator container +
  6. +
  7. + The node is now ready and will attempt to complete the DKG. You can monitor the DKG progress via the 'Logs' tab of the package. Once all clients in the cluster can establish a connection with one another and they each complete a handshake (confirm everyone has a matching cluster_definition_hash), the key generation ceremony begins. + Connect to peers in logs tab +
  8. +
  9. + Example of DKG ceremony competed log. + Connect to peers in logs tab +
  10. +
+

Create a DV Node Backup

+ It is important to back up all artefacts generated by the DKG ceremony, and your node ENR private key. The below steps will show you how to download your keys & node artefacts. +
    +
  1. + Navigate to the backup tab inside the Obol package. + Backup Tab +
  2. +
  3. + Click on the 'Backup now' button and it will open a new chrome window with a 'file save' option. Select the path where you want to save the Backup tar file. + Save Backup +
  4. +
  5. + Double click to extract the tar file. There will be folders for each charon node (max 5). Navigate to each node folder, and all artefacts related to each node will be present. + Extract File + artefacts +
  6. +
+ +
+ + +Sedge does not currently support taking part in a DKG. Follow the instructions for **Launchpad** to take part in the DKG with Charon, and in Step 4 you will import these keys into Sedge. + + +
+ +:::danger +Please make sure to create a backup of your `.charon/` folder. **If you lose your private keys you won't be able to start the DV cluster successfully and may risk your validator deposit becoming unrecoverable.** Ensure every operator has their `.charon` folder securely and privately backed up before activating any validators. +::: +:::info +The `cluster-lock` and `deposit-data` files are identical for each operator, if lost, they can be copied from one operator to another. +::: +Now that the DKG has been completed, all operators can start their nodes. +## Step 4: Start your Distributed Validator Node +With the DKG ceremony over, the last phase before activation is to prepare your node for validating over the long term. + + + + The CDVN repository is configured to sync an execution layer client (Nethermind) and a consensus layer client (Lighthouse) using Docker Compose, further client combinations can be prepared using Sedge. You can also leverage alternative ways to run a node such as Ansible, Helm, or Kubernetes manifests. + + + + + +:::info +Currently, the [CDVN repo](https://github.com/ObolNetwork/charon-distributed-validator-node) has defaults for the Holesky testnet and for mainnet. + +Start by copying the appropriate `.env.sample.` file to `.env`, and modifying values as needed. + +```shell +# To prepare the node for the Holesky test network +# Copy ".env.sample.holesky", renaming it ".env" +cp .env.sample.holesky .env + + +# To prepare the node for the main Ethereum network +# Copy ".env.sample.mainnet", renaming it ".env" +cp .env.sample.mainnet .env + + +In the same folder where you created your ENR in Step 1, and ran the DKG in Step 3, start your node in the DV cluster with docker compose. + +```shell + +# To be run from the ./charon-distributed-validator-node folder +# Spin up a Distributed Validator Node with a Validator Client +docker compose up -d +``` + +:::warning + +Do not start this node until the DKG is complete, as the charon container will interfere with the charon instance attempting to take part in the DKG ceremony. + +::: + +If at any point you need to turn off your node, you can run: + +```shell +# Shut down the currently running Distributed Validator Node +docker compose down +``` +You should use the Grafana dashboard that accompanies the quickstart repo to see whether your cluster is healthy. +```shell +# Open Grafana dashboard +open http://localhost:3000/d/d6qujIJVk/ +``` +In particular you should check: +- That your Charon client can connect to the configured beacon client. +- That your Charon client can connect to all peers directly. +- That your validator client is connected to Charon, and has the private keys it needs loaded and accessible. +Most components in the dashboard have some help text there to assist you in understanding your cluster performance. +You might notice that there are logs indicating that a validator cannot be found and that APIs are returning 404. This is to be expected at this point, as the validator public keys listed in the lock file have not been deposited and acknowledged on the consensus layer yet (usually it takes ~16 hours after the deposit is made). + + + + +To prepare a Distributed Validator node using sedge, we will use the `sedge generate` command to prepare a docker-compose file of our preferred clients, `sedge import-key` to import the artifacts created during the DKG ceremony, and `sedge run` to begin running the node. + +#### Sedge generate +With Sedge installed, and the DKG complete, it’s time to deploy a Distributed Validator. Using the `sedge generate` command and its subcommands, Sedge will create a Docker Compose file needed to run the validator node. +

+

    +
  1. + The following command generates the artifacts required to deploy a distributed validator on the Holesky network, using Teku as the validator client, Prysm as the consensus client, and Geth as the execution client. For additional supported client combinations, refer to the documentation here. + ```shell + sedge generate full-node --validator=teku --consensus=prysm --execution=geth --network=holesky --distributed + ``` + You should be shown a long list of configuration outputs with the following endings: + ```shell + 2024-09-20 12:56:15 -- [INFO] Generation of files successfully, happy staking! You can use now 'sedge run' to start the setup. + ``` +
  2. +
  3. + Explore the config files. + + You should now see a `sedge-data` directory created in the folder where you ran the `sedge generate` command. + To view the directory contents, use the `ls` command. + ```shell + ls sedge-data + > docker-compose.yml jwtsecret + ``` + +
  4. +
+

+#### Sedge Import-key + +Use the following command to import keys from the directory where the `.charon` dir is located. +```shell +sedge import-key --from ./ holesky teku +``` +#### Sedge Run +After confirming the configurations and ensuring all files are in place, use the `sedge run` command to deploy the DV docker containers. Sedge will then begin pulling all the required Docker images. + +```shell +> sedge run +2024-09-20 13:11:49 -- [INFO] [Logger Init] Log level: info +2024-09-20 13:11:49 -- [WARN] A new Version of sedge is available. Please update to the latest Version. See https://github.com/NethermindEth/sedge/releases for more information. Latest detected tag: fatal: not a git repository (or any of the parent directories): .git +2024-09-20 13:11:50 -- [INFO] Setting up containers +2024-09-20 13:11:50 -- [INFO] Running command: docker compose -f /sedge/sedge-data/docker-compose.yml build +2024-09-20 13:11:50 -- [INFO] Running command: docker compose -f /sedge-data/docker-compose.yml pull +[+] Pulling 16/44 + ⠇ consensus [⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀] Pulling 20.8s + ⠙ b003b463d750 Downloading [===============> ] 32.9kB/103.7kB 14.2s + ⠙ fe5ca62666f0 Waiting 14.2s + ⠙ b02a7525f878 Waiting 14.2s + ⠙ fcb6f6d2c998 Waiting 14.2s + ⠙ e8c73c638ae9 Waiting 14.2s + ⠙ 1e3d9b7d1452 Waiting 14.2s + ⠙ 4aa0ea1413d3 Waiting 14.2s + ⠙ 7c881f9ab25e Waiting 14.2s + ⠙ 5627a970d25e Waiting 14.2s + ⠙ 5cf83054c259 Waiting 14.2s + ⠙ fec68abcb14d Waiting 14.2s + ⠙ 4d5ad547ce94 Waiting 14.2s + ⠙ e1ea80853e89 Waiting 14.2s + ⠙ 17b1d7e8d99a Waiting 14.2s + ⠙ 841a2fc14521 Waiting 14.2s + ⠙ 55b44d28dd62 Waiting 14.2s + ⠙ f3e3115c6547 Pulling fs layer 14.2s + ⠙ 3cec53649029 Waiting 14.2s + ⠙ 01739568079a Waiting 14.2s + ⠙ c6bd24b188db Waiting 14.2s + ⠙ fe8d2e9c9467 Waiting 14.2s + ⠙ c151008cbec0 Waiting 14.2s + ⠙ de1ef6c90686 Waiting 14.2s + ⠙ 03d09d97b125 Waiting 14.2s + ✔ execution Pulled 9.3s + ✔ a258b2a6b59a Pull complete 1.5s + ✔ a2d6cf6afda3 Pull complete 1.7s + ✔ a3dd8256fc41 Pull complete 6.9s +``` +Once all docker images are pulled, sedge will create & start the containers to run all the required clients. See below for example output of the progress. + +```shell +✔ 8db8b5d461a7 Pull complete 24.1s + ✔ 2288b86b1d5f Pull complete 24.3s + ✔ 4becb7b9a44b Pull complete 24.3s + ✔ 4f4fb700ef54 Pull complete 24.3s + ✔ 5c35e3728c84 Pull complete 35.1s +2024-09-20 13:12:45 -- [INFO] Running command: docker compose -f /sedge-data/docker-compose.yml create +[+] Creating 7/7 + ✔ Network sedge-network Created 0.1s + ✔ Container sedge-dv-client Created 0.4s + ✔ Container sedge-consensus-client Created 0.4s + ✔ Container sedge-execution-client Created 0.4s + ✔ Container sedge-mev-boost Created 0.4s + ✔ Container sedge-validator-blocker Created 0.4s + ✔ Container sedge-validator-client Created 0.1s +2024-09-20 13:12:45 -- [INFO] Running command: docker compose -f /sedge-data/docker-compose.yml up -d +[+] Running 4/5 + ✔ Container sedge-consensus-client Started 1.0s + ⠧ Container sedge-validator-blocker Waiting 130.8s + ✔ Container sedge-dv-client Started 1.0s + ✔ Container sedge-execution-client Started 1.3s + ✔ Container sedge-mev-boost Started +``` + +Given time, the execution and consensus clients should complete syncing, and if a Distributed Validator has already been activated, the node should begin to validate. + +If you encounter issues with using Sedge as part of a DV cluster, consider consulting the [Sedge docs](https://docs.sedge.nethermind.io/) directly, or opening an [issue](https://github.com/NethermindEth/sedge/issues) or [pull request](https://github.com/NethermindEth/sedge/pulls) if appropriate. +
+ + + Use an ansible playbook to start your node. See the repo here for further instructions. + + + Use a Helm to start your node. See the repo here for further instructions. + + + Use Kubernetes manifests to start your Charon client and validator client. These manifests expect an existing Beacon Node Endpoint to connect to. See the repo here for further instructions. + +
+
+ +:::warning +Using a remote beacon node will impact the performance of your Distributed Validator and should be used sparingly. +::: +If you already have a beacon node running somewhere and you want to use that instead of running an EL (`nethermind`) & CL (`lighthouse`) as part of the example repo, you can disable these images. To do so, follow these steps: +1. Copy the `docker-compose.override.yml.sample` file +```shell +cp -n docker-compose.override.yml.sample docker-compose.override.yml +``` +2. Uncomment the `profiles: [disable]` section for both `nethermind` and `lighthouse`. The override file should now look like this +```docker +services: + nethermind: + # Disable nethermind + profiles: [disable] + # Bind nethermind internal ports to host ports + #ports: + #- 8545:8545 # JSON-RPC + #- 8551:8551 # AUTH-RPC + #- 6060:6060 # Metrics + lighthouse: + # Disable lighthouse + profiles: [disable] + # Bind lighthouse internal ports to host ports + #ports: + #- 5052:5052 # HTTP + #- 5054:5054 # Metrics +... +``` +3. Then, uncomment and set the `CHARON_BEACON_NODE_ENDPOINTS` variable in the `.env` file to your beacon node's URL +```shell +... +# Connect to one or more external beacon nodes. Use a comma separated list excluding spaces. +CHARON_BEACON_NODE_ENDPOINTS= +... +``` +4. Restart your docker compose +```shell +docker compose down +docker compose up -d +``` + +
+ +:::tip +In a Distributed Validator Cluster, it is important to have a low latency connection to your peers. Charon clients will use the NAT protocol to attempt to establish a direct connection to one another automatically. If this doesn't happen, you should port forward Charon's p2p port to the public internet to facilitate direct connections. The default port to expose is `:3610`. Read more about Charon's networking [here](../../learn/charon/networking.mdx). +::: + +If you have gotten to this stage, every node is up, synced and connected, congratulations. You can now move forward to [activating your validator](../running/activate-dv.mdx) to begin staking. \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/run/start/quickstart_overview.md b/versioned_docs/version-v1.2.0/run/start/quickstart_overview.md new file mode 100644 index 0000000000..0981ea1eea --- /dev/null +++ b/versioned_docs/version-v1.2.0/run/start/quickstart_overview.md @@ -0,0 +1,19 @@ +--- +sidebar_position: 1 +description: Quickstart Overview +--- + +# Quickstart Overview + +The quickstart guides are aimed at developers and stakers looking to utilize Distributed Validators for solo or multi-operator staking. To contribute to this documentation, head over to our [Github repository](https://github.com/ObolNetwork/obol-docs) and file a pull request. + +There are two ways to set up a distributed validator and each comes with its own quickstart, within the "Getting Started" section: + +1. Run a DV cluster as a [**group**](./quickstart_group.mdx), where several operators run the nodes that make up the cluster. In this setup, the key shares are created using a distributed key generation process, avoiding the full private keys being stored in full in any one place. +This approach can also be used by single operators looking to manage all nodes of a cluster but wanting to create the key shares in a trust-minimised fashion. + +2. Run a DV cluster [**alone**](./quickstart_alone.mdx), where a single operator runs all the nodes of the DV. Depending on trust assumptions, there is not necessarily the need to create the key shares via a DKG process. Instead the key shares can be created in a centralised manner, and distributed securely to the nodes. + +## Need assistance? + +If you have any questions about this documentation or are experiencing technical problems with any Obol-related projects, head on over to our [Discord](https://discord.gg/n6ebKsX46w) where a member of our team or the community will be happy to assist you. diff --git a/versioned_docs/version-v1.2.0/sdk/_category_.json b/versioned_docs/version-v1.2.0/sdk/_category_.json new file mode 100644 index 0000000000..a3429cd31c --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/_category_.json @@ -0,0 +1,6 @@ +{ + "label": "SDK", + "position": 8, + "collapsed": true, + "className": "hidden" +} \ No newline at end of file diff --git a/versioned_docs/version-v1.2.0/sdk/classes/Client.md b/versioned_docs/version-v1.2.0/sdk/classes/Client.md new file mode 100644 index 0000000000..efa5e2b25f --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/classes/Client.md @@ -0,0 +1,247 @@ +Obol sdk Client can be used for creating, managing and activating distributed validators. + +## Extends + +- `Base` + +## Constructors + +### new Client() + +> **new Client**(`config`, `signer`?): [`Client`](Client.md) + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `config` | `object` | Client configurations | +| `config.baseUrl`? | `string` | obol-api url | +| `config.chainId`? | `number` | Blockchain network ID | +| `signer`? | `Signer` | ethersJS Signer | + +#### Returns + +[`Client`](Client.md) + +Obol-SDK Client instance + +An example of how to instantiate obol-sdk Client: +[obolClient](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L29) + +#### Overrides + +`Base.constructor` + +#### Defined in + +index.ts:66 + +## Methods + +### acceptObolLatestTermsAndConditions() + +> **acceptObolLatestTermsAndConditions**(): `Promise`\<`string`\> + +Accepts Obol terms and conditions to be able to create or update data. + +#### Returns + +`Promise`\<`string`\> + +terms and conditions acceptance success message. + +#### Throws + +On unverified signature or wrong hash. + +An example of how to use acceptObolLatestTermsAndConditions: +[acceptObolLatestTermsAndConditions](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L44) + +#### Defined in + +index.ts:79 + +*** + +### createObolRewardsSplit() + +> **createObolRewardsSplit**(`rewardsSplitPayload`): `Promise`\<[`ClusterValidator`](../type-aliases/ClusterValidator.md)\> + +Deploys OWR and Splitter Proxy. + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `rewardsSplitPayload` | [`RewardsSplitPayload`](../interfaces/RewardsSplitPayload.md) | Data needed to deploy owr and splitter. | + +#### Returns + +`Promise`\<[`ClusterValidator`](../type-aliases/ClusterValidator.md)\> + +owr address as withdrawal address and splitter as fee recipient + +An example of how to use createObolRewardsSplit: +[createObolRewardsSplit](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L141) + +#### Remarks + +**⚠️ Important:** If you're storing the private key in an `.env` file, ensure it is securely managed +and not pushed to version control. + +#### Defined in + +index.ts:133 + +*** + +### createObolTotalSplit() + +> **createObolTotalSplit**(`totalSplitPayload`): `Promise`\<[`ClusterValidator`](../type-aliases/ClusterValidator.md)\> + +Deploys Splitter Proxy. + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `totalSplitPayload` | [`TotalSplitPayload`](../type-aliases/TotalSplitPayload.md) | Data needed to deploy splitter if it doesnt exist. | + +#### Returns + +`Promise`\<[`ClusterValidator`](../type-aliases/ClusterValidator.md)\> + +splitter address as withdrawal address and splitter as fee recipient too + +An example of how to use createObolTotalSplit: +[createObolTotalSplit](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L168) + +#### Remarks + +**⚠️ Important:** If you're storing the private key in an `.env` file, ensure it is securely managed +and not pushed to version control. + +#### Defined in + +index.ts:254 + +*** + +### createClusterDefinition() + +> **createClusterDefinition**(`newCluster`): `Promise`\<`string`\> + +Creates a cluster definition which contains cluster configuration. + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `newCluster` | [`ClusterPayload`](../type-aliases/ClusterPayload.md) | The new unique cluster. | + +#### Returns + +`Promise`\<`string`\> + +config_hash. + +#### Throws + +On duplicate entries, missing or wrong cluster keys. + +An example of how to use createClusterDefinition: +[createObolCluster](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L59) + +#### Defined in + +index.ts:350 + +*** + +### acceptClusterDefinition() + +> **acceptClusterDefinition**(`operatorPayload`, `configHash`): `Promise`\<[`ClusterDefinition`](../interfaces/ClusterDefinition.md)\> + +Approves joining a cluster with specific configuration. + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `operatorPayload` | [`OperatorPayload`](../type-aliases/OperatorPayload.md) | The operator data including signatures. | +| `configHash` | `string` | The config hash of the cluster which the operator confirms joining to. | + +#### Returns + +`Promise`\<[`ClusterDefinition`](../interfaces/ClusterDefinition.md)\> + +The cluster definition. + +#### Throws + +On unauthorized, duplicate entries, missing keys, not found cluster or invalid data. + +An example of how to use acceptClusterDefinition: +[acceptClusterDefinition](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L106) + +#### Defined in + +index.ts:415 + +*** + +### getClusterDefinition() + +> **getClusterDefinition**(`configHash`): `Promise`\<[`ClusterDefinition`](../interfaces/ClusterDefinition.md)\> + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `configHash` | `string` | The configuration hash returned in createClusterDefinition | + +#### Returns + +`Promise`\<[`ClusterDefinition`](../interfaces/ClusterDefinition.md)\> + +The cluster definition for config hash + +#### Throws + +On not found config hash. + +An example of how to use getClusterDefinition: +[getObolClusterDefinition](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L74) + +#### Defined in + +index.ts:469 + +*** + +### getClusterLock() + +> **getClusterLock**(`configHash`): `Promise`\<[`ClusterLock`](../type-aliases/ClusterLock.md)\> + +#### Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `configHash` | `string` | The configuration hash in cluster-definition | + +#### Returns + +`Promise`\<[`ClusterLock`](../type-aliases/ClusterLock.md)\> + +The matched cluster details (lock) from DB + +#### Throws + +On not found cluster definition or lock. + +An example of how to use getClusterLock: +[getObolClusterLock](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L89) + +#### Defined in + +index.ts:488 diff --git a/versioned_docs/version-v1.2.0/sdk/enumerations/FORK_MAPPING.md b/versioned_docs/version-v1.2.0/sdk/enumerations/FORK_MAPPING.md new file mode 100644 index 0000000000..e75f2a8d21 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/enumerations/FORK_MAPPING.md @@ -0,0 +1,10 @@ +Permitted ChainID's + +## Enumeration Members + +| Enumeration Member | Value | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `0x00000000` | `1` | Mainnet. | types.ts:6 | +| `0x00001020` | `5` | Goerli/Prater. | types.ts:9 | +| `0x00000064` | `100` | Gnosis Chain. | types.ts:12 | +| `0x01017000` | `17000` | Holesky. | types.ts:15 | diff --git a/versioned_docs/version-v1.2.0/sdk/functions/validateClusterLock.md b/versioned_docs/version-v1.2.0/sdk/functions/validateClusterLock.md new file mode 100644 index 0000000000..03324c6a26 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/functions/validateClusterLock.md @@ -0,0 +1,26 @@ +> **validateClusterLock**(`lock`): `Promise`\<`boolean`\> + +Verifies Cluster Lock's validity. + +## Parameters + +| Parameter | Type | Description | +| ------ | ------ | ------ | +| `lock` | [`ClusterLock`](../type-aliases/ClusterLock.md) | cluster lock | + +## Returns + +`Promise`\<`boolean`\> + +boolean result to indicate if lock is valid + +## Throws + +on missing keys or values. + +An example of how to use validateClusterLock: +[validateClusterLock](https://github.com/ObolNetwork/obol-sdk-examples/blob/main/TS-Example/index.ts#L127) + +## Defined in + +services.ts:13 diff --git a/versioned_docs/version-v1.2.0/sdk/index.md b/versioned_docs/version-v1.2.0/sdk/index.md new file mode 100644 index 0000000000..4bb6af85d6 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/index.md @@ -0,0 +1,90 @@ +--- +hide_title: true +--- + + + SDK Version: v2.1.0 + + +![Obol Logo](https://obol.org/obolnetwork.png) + +

Obol SDK

+ +This repo contains the Obol Software Development Kit, for creating Distributed Validators with the help of the [Obol API](https://docs.obol.org/api). + +## Getting Started + +Checkout our [docs](https://docs.obol.org/advanced/quickstart-sdk), [examples](https://github.com/ObolNetwork/obol-sdk-examples/), and SDK [reference](https://obolnetwork.github.io/obol-sdk). Further guides and walkthroughs coming soon. + +## Terms and Conditions +To use obol-sdk and in order to be able to create a cluster definition or accept an invite to join a cluster, you must accept the [latest Obol terms and conditions](https://obol.org/terms.pdf) by calling acceptObolLatestTermsAndConditions. + +## ⚠️ Important Security Notice: +If you're integrating this SDK with a **backend** (e.g., in Node.js), and you store a private key for executing splitter transactions, handle it with extreme caution. Ensure that: + +- The private key is securely stored (e.g., in an `.env` file). +- Never commit or push your `.env` file containing the private key to version control. + +## Contributing + +Please review the following guidelines: + +- [How to Report Bugs](#how-to-report-bugs) +- [How to Propose Changes](#how-to-propose-changes) +- [Code Review Process](#code-review-process) + +### How to Report Bugs + +If you encounter a bug or unexpected behavior, please follow these steps to report it: + +1. Go to the "Issues" tab of this repository. +2. Click on the "Get started" button in the Bug report section. +3. Provide a clear title and description of the issue following the format provided. + +### How to Propose Changes + +If you'd like to propose improvements or new features, please follow these steps: + +1. Fork this repository. +2. Create a new branch for your changes. +3. Make your changes and commit them with clear messages. +4. Open a pull request with a detailed description of the changes. + +### Code Review Process + +All contributions are reviewed before they are merged into the main branch. Please address any feedback provided during the review process. + +Thank you for contributing to Obol-SDK! + +## Enumerations + +- [FORK\_MAPPING](enumerations/FORK_MAPPING.md) + +## Classes + +- [Client](classes/Client.md) + +## Interfaces + +- [ClusterDefinition](interfaces/ClusterDefinition.md) +- [RewardsSplitPayload](interfaces/RewardsSplitPayload.md) + +## Type Aliases + +- [ClusterOperator](type-aliases/ClusterOperator.md) +- [OperatorPayload](type-aliases/OperatorPayload.md) +- [ClusterCreator](type-aliases/ClusterCreator.md) +- [ClusterValidator](type-aliases/ClusterValidator.md) +- [ClusterPayload](type-aliases/ClusterPayload.md) +- [SplitRecipient](type-aliases/SplitRecipient.md) +- [TotalSplitPayload](type-aliases/TotalSplitPayload.md) +- [BuilderRegistrationMessage](type-aliases/BuilderRegistrationMessage.md) +- [BuilderRegistration](type-aliases/BuilderRegistration.md) +- [DepositData](type-aliases/DepositData.md) +- [DistributedValidator](type-aliases/DistributedValidator.md) +- [ClusterLock](type-aliases/ClusterLock.md) +- [ETH\_ADDRESS](type-aliases/ETH_ADDRESS.md) + +## Functions + +- [validateClusterLock](functions/validateClusterLock.md) diff --git a/versioned_docs/version-v1.2.0/sdk/interfaces/ClusterDefinition.md b/versioned_docs/version-v1.2.0/sdk/interfaces/ClusterDefinition.md new file mode 100644 index 0000000000..23ea4a2075 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/interfaces/ClusterDefinition.md @@ -0,0 +1,24 @@ +Cluster definition data needed for dkg + +## Extends + +- [`ClusterPayload`](../type-aliases/ClusterPayload.md) + +## Properties + +| Property | Type | Description | Inherited from | Defined in | +| ------ | ------ | ------ | ------ | ------ | +| `name` | `string` | The cluster name. | `ClusterPayload.name` | types.ts:73 | +| `operators` | [`ClusterOperator`](../type-aliases/ClusterOperator.md)[] | The cluster nodes operators addresses. | `ClusterPayload.operators` | types.ts:76 | +| `validators` | [`ClusterValidator`](../type-aliases/ClusterValidator.md)[] | The cluster validators information. | `ClusterPayload.validators` | types.ts:79 | +| `deposit_amounts?` | `null` \| `string`[] | The cluster partial deposits in gwei or 32000000000. | `ClusterPayload.deposit_amounts` | types.ts:82 | +| `creator` | [`ClusterCreator`](../type-aliases/ClusterCreator.md) | The creator of the cluster. | - | types.ts:90 | +| `version` | `string` | The cluster configuration version. | - | types.ts:93 | +| `dkg_algorithm` | `string` | The cluster dkg algorithm. | - | types.ts:96 | +| `fork_version` | `string` | The cluster fork version. | - | types.ts:99 | +| `uuid` | `string` | The cluster uuid. | - | types.ts:102 | +| `timestamp` | `string` | The cluster creation timestamp. | - | types.ts:105 | +| `config_hash` | `string` | The cluster configuration hash. | - | types.ts:108 | +| `threshold` | `number` | The distributed validator threshold. | - | types.ts:111 | +| `num_validators` | `number` | The number of distributed validators in the cluster. | - | types.ts:114 | +| `definition_hash?` | `string` | The hash of the cluster definition. | - | types.ts:117 | diff --git a/versioned_docs/version-v1.2.0/sdk/interfaces/RewardsSplitPayload.md b/versioned_docs/version-v1.2.0/sdk/interfaces/RewardsSplitPayload.md new file mode 100644 index 0000000000..0100f82208 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/interfaces/RewardsSplitPayload.md @@ -0,0 +1,17 @@ +OWR and Split Proxy Params + +## Extends + +- [`TotalSplitPayload`](../type-aliases/TotalSplitPayload.md) + +## Properties + +| Property | Type | Description | Inherited from | Defined in | +| ------ | ------ | ------ | ------ | ------ | +| `splitRecipients` | [`SplitRecipient`](../type-aliases/SplitRecipient.md)[] | The split recipients addresses and splits. | `TotalSplitPayload.splitRecipients` | types.ts:136 | +| `ObolRAFSplit?` | `number` | Split percentageNumber allocated for obol retroactive funding, minimum is 1%. | `TotalSplitPayload.ObolRAFSplit` | types.ts:139 | +| `distributorFee?` | `number` | The percentageNumber of accrued rewards that is paid to the caller of the distribution function to compensate them for the gas costs of doing so. Cannot be greater than 10%. For example, 5 represents 5%. | `TotalSplitPayload.distributorFee` | types.ts:142 | +| `controllerAddress?` | `string` | Address that can mutate the split, should be ZeroAddress for immutable split. | `TotalSplitPayload.controllerAddress` | types.ts:145 | +| `principalRecipient` | `string` | Address that will reclaim validator principal after exit. | - | types.ts:153 | +| `etherAmount` | `number` | Amount needed to deploy all validators expected for the OWR/Splitter configuration. | - | types.ts:156 | +| `recoveryAddress?` | `string` | Address that can control where the owr erc-20 tokens can be pushed, if set to zero it goes to splitter or principal address. | - | types.ts:159 | diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistration.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistration.md new file mode 100644 index 0000000000..19d4dad964 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistration.md @@ -0,0 +1,14 @@ +> **BuilderRegistration**: `object` + +Pre-generated Signed Validator Builder Registration + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `message` | [`BuilderRegistrationMessage`](BuilderRegistrationMessage.md) | Builder registration message. | types.ts:184 | +| `signature` | `string` | BLS signature of the builder registration message. | types.ts:187 | + +## Defined in + +types.ts:182 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistrationMessage.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistrationMessage.md new file mode 100644 index 0000000000..0c21d2cb49 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/BuilderRegistrationMessage.md @@ -0,0 +1,16 @@ +> **BuilderRegistrationMessage**: `object` + +Unsigned DV Builder Registration Message + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `fee_recipient` | `string` | The DV fee recipient. | types.ts:167 | +| `gas_limit` | `number` | Default is 30000000. | types.ts:170 | +| `timestamp` | `number` | Timestamp when generating cluster lock file. | types.ts:173 | +| `pubkey` | `string` | The public key of the DV. | types.ts:176 | + +## Defined in + +types.ts:165 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterCreator.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterCreator.md new file mode 100644 index 0000000000..7ac2619dc8 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterCreator.md @@ -0,0 +1,14 @@ +> **ClusterCreator**: `object` + +Cluster creator data + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `address` | `string` | The creator address. | types.ts:52 | +| `config_signature`? | `string` | The cluster configuration signature. | types.ts:54 | + +## Defined in + +types.ts:50 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterLock.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterLock.md new file mode 100644 index 0000000000..ac50cb9ca0 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterLock.md @@ -0,0 +1,17 @@ +> **ClusterLock**: `object` + +Cluster Details after DKG is complete + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `cluster_definition` | [`ClusterDefinition`](../interfaces/ClusterDefinition.md) | The cluster definition. | types.ts:235 | +| `distributed_validators` | [`DistributedValidator`](DistributedValidator.md)[] | The cluster distributed validators. | types.ts:238 | +| `signature_aggregate` | `string` | The cluster bls signature aggregate. | types.ts:241 | +| `lock_hash` | `string` | The hash of the cluster lock. | types.ts:244 | +| `node_signatures`? | `string`[] | Node Signature for the lock hash by the node secp256k1 key. | types.ts:247 | + +## Defined in + +types.ts:233 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterOperator.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterOperator.md new file mode 100644 index 0000000000..5628815bf8 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterOperator.md @@ -0,0 +1,18 @@ +> **ClusterOperator**: `object` + +Node operator data + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `address` | `string` | The operator address. | types.ts:23 | +| `enr`? | `string` | The operator ethereum node record. | types.ts:26 | +| `fork_version`? | `string` | The cluster fork_version. | types.ts:29 | +| `version`? | `string` | The cluster version. | types.ts:32 | +| `enr_signature`? | `string` | The operator enr signature. | types.ts:35 | +| `config_signature`? | `string` | The operator configuration signature. | types.ts:38 | + +## Defined in + +types.ts:21 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterPayload.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterPayload.md new file mode 100644 index 0000000000..e3003704c2 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterPayload.md @@ -0,0 +1,16 @@ +> **ClusterPayload**: `object` + +Cluster configuration + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `name` | `string` | The cluster name. | types.ts:73 | +| `operators` | [`ClusterOperator`](ClusterOperator.md)[] | The cluster nodes operators addresses. | types.ts:76 | +| `validators` | [`ClusterValidator`](ClusterValidator.md)[] | The cluster validators information. | types.ts:79 | +| `deposit_amounts`? | `string`[] \| `null` | The cluster partial deposits in gwei or 32000000000. | types.ts:82 | + +## Defined in + +types.ts:71 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterValidator.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterValidator.md new file mode 100644 index 0000000000..852f4ad2de --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ClusterValidator.md @@ -0,0 +1,14 @@ +> **ClusterValidator**: `object` + +Validator withdrawal configuration + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `fee_recipient_address` | `string` | Address to receive MEV rewards (if enabled), block proposal and priority fees. | types.ts:62 | +| `withdrawal_address` | `string` | Address to receive skimming rewards and validator principal at exit. | types.ts:65 | + +## Defined in + +types.ts:60 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/DepositData.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/DepositData.md new file mode 100644 index 0000000000..7659bf7e98 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/DepositData.md @@ -0,0 +1,17 @@ +> **DepositData**: `object` + +Required deposit data for validator activation + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `pubkey` | `string` | The public key of the distributed validator. | types.ts:195 | +| `withdrawal_credentials` | `string` | The 0x01 withdrawal address of the DV. | types.ts:198 | +| `amount` | `string` | 32 ethers. | types.ts:201 | +| `deposit_data_root` | `string` | A checksum for DepositData fields . | types.ts:204 | +| `signature` | `string` | BLS signature of the deposit message. | types.ts:207 | + +## Defined in + +types.ts:193 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/DistributedValidator.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/DistributedValidator.md new file mode 100644 index 0000000000..a440fe6d71 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/DistributedValidator.md @@ -0,0 +1,17 @@ +> **DistributedValidator**: `object` + +Required deposit data for validator activation + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `distributed_public_key` | `string` | The public key of the distributed validator. | types.ts:215 | +| `public_shares` | `string`[] | The public key of the node distributed validator share. | types.ts:218 | +| `deposit_data`? | `Partial`\<[`DepositData`](DepositData.md)\> | The deposit data for activating the DV. | types.ts:221 | +| `partial_deposit_data`? | `Partial`\<[`DepositData`](DepositData.md)\>[] | The deposit data with partial amounts or full amount for activating the DV. | types.ts:224 | +| `builder_registration`? | [`BuilderRegistration`](BuilderRegistration.md) | pre-generated signed validator builder registration to be sent to builder network. | types.ts:227 | + +## Defined in + +types.ts:213 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/ETH_ADDRESS.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/ETH_ADDRESS.md new file mode 100644 index 0000000000..1d2ebd7b18 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/ETH_ADDRESS.md @@ -0,0 +1,7 @@ +> **ETH\_ADDRESS**: `string` + +String expected to be Ethereum Address + +## Defined in + +types.ts:253 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/OperatorPayload.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/OperatorPayload.md new file mode 100644 index 0000000000..fed8c309a3 --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/OperatorPayload.md @@ -0,0 +1,7 @@ +> **OperatorPayload**: `Partial`\<[`ClusterOperator`](ClusterOperator.md)\> & `Required`\<`Pick`\<[`ClusterOperator`](ClusterOperator.md), `"enr"` \| `"version"`\>\> + +A partial view of `ClusterOperator` with `enr` and `version` as required properties. + +## Defined in + +types.ts:44 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/SplitRecipient.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/SplitRecipient.md new file mode 100644 index 0000000000..2fd6d43dbd --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/SplitRecipient.md @@ -0,0 +1,14 @@ +> **SplitRecipient**: `object` + +Split Recipient Keys + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `account` | `string` | The split recipient address. | types.ts:125 | +| `percentAllocation` | `number` | The recipient split. | types.ts:128 | + +## Defined in + +types.ts:123 diff --git a/versioned_docs/version-v1.2.0/sdk/type-aliases/TotalSplitPayload.md b/versioned_docs/version-v1.2.0/sdk/type-aliases/TotalSplitPayload.md new file mode 100644 index 0000000000..60578fc4ed --- /dev/null +++ b/versioned_docs/version-v1.2.0/sdk/type-aliases/TotalSplitPayload.md @@ -0,0 +1,16 @@ +> **TotalSplitPayload**: `object` + +Split Proxy Params + +## Type declaration + +| Name | Type | Description | Defined in | +| ------ | ------ | ------ | ------ | +| `splitRecipients` | [`SplitRecipient`](SplitRecipient.md)[] | The split recipients addresses and splits. | types.ts:136 | +| `ObolRAFSplit`? | `number` | Split percentageNumber allocated for obol retroactive funding, minimum is 1%. | types.ts:139 | +| `distributorFee`? | `number` | The percentageNumber of accrued rewards that is paid to the caller of the distribution function to compensate them for the gas costs of doing so. Cannot be greater than 10%. For example, 5 represents 5%. | types.ts:142 | +| `controllerAddress`? | `string` | Address that can mutate the split, should be ZeroAddress for immutable split. | types.ts:145 | + +## Defined in + +types.ts:134 diff --git a/versioned_sidebars/version-v1.2.0-sidebars.json b/versioned_sidebars/version-v1.2.0-sidebars.json new file mode 100644 index 0000000000..bb09f9834c --- /dev/null +++ b/versioned_sidebars/version-v1.2.0-sidebars.json @@ -0,0 +1,127 @@ +{ + "tutorialSidebar": [ + { + "type": "autogenerated", + "dirName": "." + } + ], + "apiSidebar": [ + { + "type": "doc", + "id": "sdk/index", + "label": "Intro" + }, + { + "type": "category", + "label": "Enumerations", + "collapsible": true, + "collapsed": true, + "items": [ + { + "type": "doc", + "id": "sdk/enumerations/FORK_MAPPING" + } + ] + }, + { + "type": "category", + "label": "Classes", + "collapsible": true, + "collapsed": false, + "items": [ + { + "type": "doc", + "id": "sdk/classes/Client" + } + ] + }, + { + "type": "category", + "label": "Interfaces", + "collapsible": true, + "collapsed": true, + "items": [ + { + "type": "doc", + "id": "sdk/interfaces/ClusterDefinition" + }, + { + "type": "doc", + "id": "sdk/interfaces/RewardsSplitPayload" + } + ] + }, + { + "type": "category", + "label": "Type-Aliases", + "collapsible": true, + "collapsed": true, + "items": [ + { + "type": "doc", + "id": "sdk/type-aliases/BuilderRegistration" + }, + { + "type": "doc", + "id": "sdk/type-aliases/BuilderRegistrationMessage" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ClusterCreator" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ClusterLock" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ClusterOperator" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ClusterPayload" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ClusterValidator" + }, + { + "type": "doc", + "id": "sdk/type-aliases/DepositData" + }, + { + "type": "doc", + "id": "sdk/type-aliases/DistributedValidator" + }, + { + "type": "doc", + "id": "sdk/type-aliases/ETH_ADDRESS" + }, + { + "type": "doc", + "id": "sdk/type-aliases/OperatorPayload" + }, + { + "type": "doc", + "id": "sdk/type-aliases/SplitRecipient" + }, + { + "type": "doc", + "id": "sdk/type-aliases/TotalSplitPayload" + } + ] + }, + { + "type": "category", + "label": "Functions", + "collapsible": true, + "collapsed": true, + "items": [ + { + "type": "doc", + "id": "sdk/functions/validateClusterLock" + } + ] + } + ] +} diff --git a/versions.json b/versions.json index 83723934b1..abab509409 100644 --- a/versions.json +++ b/versions.json @@ -1,4 +1,5 @@ [ + "v1.2.0", "v1.1.2", "v1.1.1", "v1.1.0",