diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d6ec61114c7b..2dcb5dadb174 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: component: @@ -56,6 +56,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -91,7 +92,6 @@ jobs: run: | ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - - name: login to Docker registries if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 47ae3c517517..2f29fe98f0e6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -42,6 +42,9 @@ jobs: - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' + - '.github/workflows/new-build-prover-template.yml' + - '.github/workflows/build-witness-generator-template.yml' + - '.github/workflows/new-build-witness-generator-template.yml' - '.github/workflows/ci-prover-reusable.yml' - 'docker-compose-runner-nightly.yml' - '!**/*.md' @@ -53,7 +56,9 @@ jobs: - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' + - '.github/workflows/new-build-core-template.yml' - '.github/workflows/build-contract-verifier-template.yml' + - '.github/workflows/new-build-contract-verifier-template.yml' - '.github/workflows/ci-core-reusable.yml' - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index b5286782fad7..9b23cda6f02a 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -74,36 +74,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -112,6 +106,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -129,18 +124,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index e8a41a7e0646..c4aeb9180fda 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -79,36 +79,30 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' run: | mkdir ./foundry-zksync curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz @@ -117,6 +111,7 @@ jobs: echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -134,18 +129,14 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash - run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json diff --git a/Cargo.lock b/Cargo.lock index 26929be71957..d3d75146aeee 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10710,7 +10710,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10878,6 +10877,8 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_utils", + "zksync_vm_executor", ] [[package]] @@ -11187,6 +11188,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11194,31 +11197,11 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", "zksync_prover_interface", "zksync_types", "zksync_utils", ] -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", - "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", - "zksync_types", - "zksync_utils", - "zksync_vm_executor", -] - [[package]] name = "zksync_test_account" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index e86d67a435ff..4090f28cd0e3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,7 +35,6 @@ members = [ "core/node/consensus", "core/node/contract_verification_server", "core/node/api_server", - "core/node/tee_verifier_input_producer", "core/node/base_token_adjuster", "core/node/external_proof_integration_api", "core/node/logs_bloom_backfill", @@ -309,6 +308,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index da0a93f624df..9e1a1b5948c7 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -45,7 +45,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index b04227965f8c..c87bf3ce2dda 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -55,7 +55,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -288,6 +287,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } @@ -493,14 +493,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -727,9 +719,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba8868..ffc2839b8d3b 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3d..bb7176644e63 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -90,9 +90,9 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + match self.api_client.get_job(self.config.tee_type).await { + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -104,10 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { + Ok(None) => { tracing::trace!("There are currently no pending batches to be proven"); Ok(None) } + Err(err) => Err(err), } } } diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs index 41131fc1b8c7..6f83f0d2d18b 100644 --- a/core/lib/config/src/configs/prover_autoscaler.rs +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -51,6 +51,8 @@ pub struct ProverAutoscalerScalerConfig { pub cluster_priorities: HashMap, /// Prover speed per GPU. Used to calculate desired number of provers for queue size. pub prover_speed: HashMap, + /// Maximum number of provers which can be run per cluster/GPU. + pub max_provers: HashMap>, /// Duration after which pending pod considered long pending. #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] pub long_pending_duration: Duration, diff --git a/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json new file mode 100644 index 000000000000..189e28f565d4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block,\n miniblocks.number AS \"miniblock_number!\",\n miniblocks.hash AS \"miniblocks_hash!\"\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "miniblocks_hash!", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true, + false, + false + ] + }, + "hash": "0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69" +} diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249ac..000000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json similarity index 50% rename from core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json rename to core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json index 06d3461c3fa3..4f44879b6ec3 100644 --- a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json +++ b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version,\n hash\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "protocol_version", "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ - true + true, + false ] }, - "hash": "894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2" + "hash": "2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850" } diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189fc..000000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4e..000000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c6326515..000000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json new file mode 100644 index 000000000000..4d006b6d1d5d --- /dev/null +++ b/core/lib/dal/.sqlx/query-4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n l1_batches l1\n ON p.l1_batch_number = l1.number\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n AND l1.hash IS NOT NULL\n AND l1.aux_data_hash IS NOT NULL\n AND l1.meta_parameters_hash IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "4498e1c1ff179eacd03bd9ec24a7dc982c8cfb0e2277aff8dfaa9654255451ac" +} diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb3..000000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643a..000000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json deleted file mode 100644 index 3b8accb4fda2..000000000000 --- a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true, - true - ] - }, - "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" -} diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b58282110..000000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741ad..000000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 000000000000..12e28266fbcc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 000000000000..707ce306365c --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,20 @@ +CREATE TABLE tee_verifier_input_producer_jobs ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 000000000000..c2417ba86b3b --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 829e15b5710a..4cb577986380 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + debug_flat_call::CallTraceMeta, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, web3::{BlockHeader, Bytes}, @@ -531,11 +532,12 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { - let protocol_version = sqlx::query!( + ) -> DalResult> { + let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + hash FROM miniblocks WHERE @@ -543,14 +545,20 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .try_map(|row| { + row.protocol_version + .map(parse_protocol_version) + .transpose() + .map(|val| (val, H256::from_slice(&row.hash))) + }) .instrument("get_traces_for_l2_block#get_l2_block_protocol_version_id") .with_arg("l2_block_number", &block_number) .fetch_optional(self.storage) .await?; - let Some(protocol_version) = protocol_version else { + let Some((protocol_version, block_hash)) = row else { return Ok(Vec::new()); }; + let protocol_version = protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined); @@ -577,9 +585,15 @@ impl BlocksWeb3Dal<'_, '_> { .await? .into_iter() .map(|call_trace| { - let hash = H256::from_slice(&call_trace.tx_hash); + let tx_hash = H256::from_slice(&call_trace.tx_hash); let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; - (call_trace.into_call(protocol_version), hash, index) + let meta = CallTraceMeta { + index_in_block: index, + tx_hash, + block_number: block_number.0, + block_hash, + }; + (call_trace.into_call(protocol_version), meta) }) .collect()) } @@ -1105,9 +1119,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { + for ((trace, meta), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); - assert_eq!(&tx_result.hash, hash); + assert_eq!(tx_result.hash, meta.tx_hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index f0d2f0c16711..fbe225beb902 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e7..d865212f190c 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -39,61 +36,78 @@ impl TeeProofGenerationDal<'_, '_> { ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 + WITH upsert AS ( + SELECT + p.l1_batch_number + FROM + proof_generation_details p + LEFT JOIN + l1_batches l1 + ON p.l1_batch_number = l1.number + LEFT JOIN + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 + WHERE + ( + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + AND l1.hash IS NOT NULL + AND l1.aux_data_hash IS NOT NULL + AND l1.meta_parameters_hash IS NOT NULL + ) + AND ( + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + FETCH FIRST ROW ONLY + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(batch_number) + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( @@ -176,38 +190,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -271,6 +253,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" @@ -278,18 +294,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7d..000000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 67c965312bd4..5314e9799b33 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,9 +10,10 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ @@ -2131,12 +2132,17 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace( + &mut self, + tx_hash: H256, + ) -> DalResult> { let row = sqlx::query!( r#" SELECT protocol_version, - index_in_block + index_in_block, + miniblocks.number AS "miniblock_number!", + miniblocks.hash AS "miniblocks_hash!" FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2177,7 +2183,12 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| { ( parse_call_trace(&call_trace.call_trace, protocol_version), - row.index_in_block.unwrap_or_default() as usize, + CallTraceMeta { + index_in_block: row.index_in_block.unwrap_or_default() as usize, + tx_hash, + block_number: row.miniblock_number as u32, + block_hash: H256::from_slice(&row.miniblocks_hash), + }, ) })) } diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427fb..3484f2dad347 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e246..0859d58d04be 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto index e1d11b94d8f1..8363b6251199 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -34,6 +34,11 @@ message ProverSpeed { optional uint32 speed = 2; // required } +message MaxProver { + optional string cluster_and_gpu = 1; // required, format: / + optional uint32 max = 2; // required +} + message ProverAutoscalerScalerConfig { optional uint32 prometheus_port = 1; // required optional std.Duration scaler_run_interval = 2; // optional @@ -43,4 +48,5 @@ message ProverAutoscalerScalerConfig { repeated ClusterPriority cluster_priorities = 6; // optional repeated ProverSpeed prover_speed = 7; // optional optional uint32 long_pending_duration_s = 8; // optional + repeated MaxProver max_provers = 9; // optional } diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs index f7da099cb829..e95e4003972e 100644 --- a/core/lib/protobuf_config/src/prover_autoscaler.rs +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use anyhow::Context as _; use time::Duration; use zksync_config::configs::{self, prover_autoscaler::Gpu}; @@ -92,6 +94,15 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { Some(s) => Duration::seconds(s.into()), None => Self::Type::default_long_pending_duration(), }, + max_provers: self.max_provers.iter().fold(HashMap::new(), |mut acc, e| { + let (cluster_and_gpu, max) = e.read().expect("max_provers"); + if let Some((cluster, gpu)) = cluster_and_gpu.split_once('/') { + acc.entry(cluster.to_string()) + .or_default() + .insert(gpu.parse().expect("max_provers/gpu"), max); + } + acc + }), }) } @@ -117,6 +128,15 @@ impl ProtoRepr for proto::ProverAutoscalerScalerConfig { .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) .collect(), long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + max_provers: this + .max_provers + .iter() + .flat_map(|(cluster, inner_map)| { + inner_map.iter().map(move |(gpu, max)| { + proto::MaxProver::build(&(format!("{}/{}", cluster, gpu), *max)) + }) + }) + .collect(), } } } @@ -170,3 +190,21 @@ impl ProtoRepr for proto::ProverSpeed { } } } + +impl ProtoRepr for proto::MaxProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster_and_gpu) + .context("cluster_and_gpu")? + .parse()?, + *required(&self.max).context("max")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster_and_gpu: Some(this.0.to_string()), + max: Some(this.1), + } + } +} diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cbe..acf104cc4c61 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 28bc1998312b..97de24f42dae 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -136,7 +136,7 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, @@ -205,7 +205,7 @@ impl StoredObject for VMRunWitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -254,7 +254,7 @@ impl StoredObject for WitnessInputData { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, @@ -264,27 +264,27 @@ pub struct L1BatchMetadataHashes { /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, } } } @@ -305,17 +305,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b10..331c47e365eb 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,21 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 86b563f823e8..ffe3a548a02b 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,16 +4,14 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, @@ -23,8 +21,10 @@ use zksync_multivm::{ use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, L1BatchNumber, StorageLog, StorageValue, Transaction, H256, +}; +use zksync_utils::u256_to_h256; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,30 +50,48 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; + let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +107,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +125,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,19 +157,21 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. @@ -176,11 +195,17 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } + tracing::trace!("about to vm.finish_batch()"); + Ok(vm.finish_batch()) } @@ -191,7 +216,7 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } @@ -203,24 +228,31 @@ fn map_log_tree( (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } TreeInstruction::Read(key) } (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,8 +301,7 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; use zksync_types::U256; use super::*; @@ -278,6 +309,18 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + evm_emulator_code_hash: Some(Default::default()), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -313,14 +356,11 @@ mod tests { default_validation_computational_gas_limit: 0, chain_id: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 103b6de1fb38..1c7672264cb4 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -14,8 +14,9 @@ pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::{ - debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, - ProtocolVersionId, + debug_flat_call::{DebugCallFlat, ResultDebugCallFlat}, + protocol_version::L1VerifierConfig, + Address, L2BlockNumber, ProtocolVersionId, }; pub mod en; @@ -763,11 +764,11 @@ pub enum BlockStatus { #[serde(untagged)] pub enum CallTracerBlockResult { CallTrace(Vec), - FlatCallTrace(Vec), + FlatCallTrace(Vec), } impl CallTracerBlockResult { - pub fn unwrap_flat(self) -> Vec { + pub fn unwrap_flat(self) -> Vec { match self { Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), Self::FlatCallTrace(trace) => trace, diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 89a008b5fb5f..5809026e521c 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -3,6 +3,13 @@ use zksync_basic_types::{web3::Bytes, U256}; use crate::{api::DebugCallType, Address, H256}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCallFlat { + pub tx_hash: H256, + pub result: Vec, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { @@ -12,6 +19,8 @@ pub struct DebugCallFlat { pub trace_address: Vec, pub transaction_position: usize, pub transaction_hash: H256, + pub block_number: u32, + pub block_hash: H256, pub r#type: DebugCallType, } @@ -32,3 +41,11 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct CallTraceMeta { + pub index_in_block: usize, + pub tx_hash: H256, + pub block_number: u32, + pub block_hash: H256, +} diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d7..7f3195af873f 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 9d399bdd0aff..87fb7ea28f71 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -43,9 +43,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -88,9 +85,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index e296fe87faa2..726f35ac29a9 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -7,7 +7,7 @@ use zksync_types::{ BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, ResultDebugCall, SupportedTracers, TracerConfig, }, - debug_flat_call::{Action, CallResult, DebugCallFlat}, + debug_flat_call::{Action, CallResult, CallTraceMeta, DebugCallFlat, ResultDebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, web3, H256, U256, @@ -31,8 +31,7 @@ impl DebugNamespace { pub(crate) fn map_call( call: Call, - index: usize, - transaction_hash: H256, + meta: CallTraceMeta, tracer_option: TracerConfig, ) -> CallTracerResult { match tracer_option.tracer { @@ -42,14 +41,13 @@ impl DebugNamespace { )), SupportedTracers::FlatCallTracer => { let mut calls = vec![]; - let mut traces = vec![index]; + let mut traces = vec![meta.index_in_block]; Self::flatten_call( call, &mut calls, &mut traces, tracer_option.tracer_config.only_top_call, - index, - transaction_hash, + &meta, ); CallTracerResult::FlatCallTrace(calls) } @@ -89,8 +87,7 @@ impl DebugNamespace { calls: &mut Vec, trace_address: &mut Vec, only_top_call: bool, - transaction_position: usize, - transaction_hash: H256, + meta: &CallTraceMeta, ) { let subtraces = call.calls.len(); let debug_type = match call.r#type { @@ -120,22 +117,17 @@ impl DebugNamespace { result, subtraces, trace_address: trace_address.clone(), // Clone the current trace address - transaction_position, - transaction_hash, + transaction_position: meta.index_in_block, + transaction_hash: meta.tx_hash, + block_number: meta.block_number, + block_hash: meta.block_hash, r#type: DebugCallType::Call, }); if !only_top_call { for (number, call) in call.calls.into_iter().enumerate() { trace_address.push(number); - Self::flatten_call( - call, - calls, - trace_address, - false, - transaction_position, - transaction_hash, - ); + Self::flatten_call(call, calls, trace_address, false, meta); trace_address.pop(); } } @@ -158,6 +150,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; + // let block_hash = block_hash self.state. self.current_method() .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); @@ -172,25 +165,31 @@ impl DebugNamespace { SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( call_traces .into_iter() - .map(|(call, _, _)| ResultDebugCall { + .map(|(call, _)| ResultDebugCall { result: Self::map_default_call(call, options.tracer_config.only_top_call), }) .collect(), ), SupportedTracers::FlatCallTracer => { - let mut flat_calls = vec![]; - for (call, tx_hash, tx_index) in call_traces { - let mut traces = vec![tx_index]; - Self::flatten_call( - call, - &mut flat_calls, - &mut traces, - options.tracer_config.only_top_call, - tx_index, - tx_hash, - ); - } - CallTracerBlockResult::FlatCallTrace(flat_calls) + let res = call_traces + .into_iter() + .map(|(call, meta)| { + let mut traces = vec![meta.index_in_block]; + let mut flat_calls = vec![]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + &meta, + ); + ResultDebugCallFlat { + tx_hash: meta.tx_hash, + result: flat_calls, + } + }) + .collect(); + CallTracerBlockResult::FlatCallTrace(res) } }; Ok(result) @@ -207,13 +206,8 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|(call_trace, index_in_block)| { - Self::map_call( - call_trace, - index_in_block, - tx_hash, - options.unwrap_or_default(), - ) + Ok(call_trace.map(|(call_trace, meta)| { + Self::map_call(call_trace, meta, options.unwrap_or_default()) })) } @@ -305,8 +299,6 @@ impl DebugNamespace { )) } }; - // It's a call request, it's safe to keep it zero - let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -316,6 +308,12 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, 0, hash, options)) + let number = block_args.resolved_block_number(); + let meta = CallTraceMeta { + block_number: number.0, + // It's a call request, it's safe to everything as default + ..Default::default() + }; + Ok(Self::map_call(call, meta, options)) } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 4f021b777aec..28a22511fa98 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -139,32 +139,27 @@ impl HttpTest for TraceBlockFlatTest { .await? .unwrap_flat(); - // A transaction with 2 nested calls will convert into 3 Flattened calls. - // Also in this test, all tx have the same # of nested calls - assert_eq!( - block_traces.len(), - tx_results.len() * (tx_results[0].call_traces.len() + 1) - ); + assert_eq!(block_traces.len(), tx_results.len()); + + let tx_traces = &block_traces.first().unwrap().result; // First tx has 2 nested calls, thus 2 sub-traces - assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].trace_address, [0]); + assert_eq!(tx_traces[0].subtraces, 2); + assert_eq!(tx_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls - assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].trace_address, [0, 0]); + assert_eq!(tx_traces[1].subtraces, 0); + assert_eq!(tx_traces[1].trace_address, [0, 0]); - let top_level_call_indexes = [0, 3, 6]; + let top_level_call_indexes = [0, 1, 2]; let top_level_traces = top_level_call_indexes .iter() .map(|&i| block_traces[i].clone()); for (top_level_trace, tx_result) in top_level_traces.zip(&tx_results) { - assert_eq!(top_level_trace.action.from, Address::zero()); - assert_eq!(top_level_trace.action.to, BOOTLOADER_ADDRESS); - assert_eq!( - top_level_trace.action.gas, - tx_result.transaction.gas_limit() - ); + let trace = top_level_trace.result.first().unwrap(); + assert_eq!(trace.action.from, Address::zero()); + assert_eq!(trace.action.to, BOOTLOADER_ADDRESS); + assert_eq!(trace.action.gas, tx_result.transaction.gas_limit()); } // TODO: test inner calls } diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b9515..e814081afa02 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8a..17fd5d900eab 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 1df47e775539..d85f3dc7c8e9 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da19023..11a62c9333b2 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a04..3e1269caa4e4 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a226..000000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb5..76dc89eda04b 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,6 +17,8 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true +zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294aa..7d0e33ea0a3a 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,24 +20,26 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { + Self::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "An internal error occurred".to_owned(), + ) + } + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 51780f03230d..a482a7bc07b2 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); tracing::info!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -88,7 +96,7 @@ fn create_proof_processing_router( if config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); @@ -96,9 +104,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, + Err(e) => e.into_response(), + } }, ), ) diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f14..2c2a56300097 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -17,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -24,35 +31,42 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); let mut min_batch_number: Option = None; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), + else { + // No job available + return Ok(None); }; - match self.blob_store.get(l1_batch_number).await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { + Ok(input) => { + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); + } + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), @@ -62,7 +76,7 @@ impl TeeRequestProcessor { } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,14 +92,73 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection_tagged("tee_request_processor") + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + self.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, min_batch_number: Option, ) -> Result, RequestProcessorError> { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( @@ -93,8 +166,8 @@ impl TeeRequestProcessor { self.config.proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) } async fn unlock_batch( @@ -103,7 +176,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -117,7 +190,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -143,7 +216,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 8220aef5da0b..a10044cacd9c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,128 +6,64 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; +use zksync_basic_types::L2ChainId; use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - evm_emulator: None, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - - // test the /tee/proof_inputs endpoint; it should return the batch from the object store let app = create_proof_processing_router( - blob_store, - db_conn_pool, + MockObjectStore::arc(), + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -141,7 +75,7 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, @@ -149,6 +83,7 @@ async fn submit_tee_proof() { tee_support: true, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet @@ -207,32 +142,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef42..2c41ec9293a0 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c9..000000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985cc..000000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae51..000000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e9..000000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 80938e4ef835..e9d83903d117 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 5); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c642..18107f0d4f93 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index 017d79dbe736..587ba4614a59 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -312,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index da5ee313dab8..c7998b961233 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -25,13 +25,15 @@ If not provided as arguments, the tool can retrieve certain values from environm 2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` 3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` 4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` -5. `nonce` - Taken from the node via `l1rpc` -6. `gas-price` - Taken from the node via `l1rpc` -7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, +5. `upgrade-address` - `CONTRACTS_DEFAULT_UPGRADE_ADDR` +6. `l2-upgrader-address` - `CONTRACTS_L2_DEFAULT_UPGRADE_ADDR` +7. `nonce` - Taken from the node via `l1rpc` +8. `gas-price` - Taken from the node via `l1rpc` +9. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract addresses differ between environments. -8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it - explicitly. +10. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. ### Create a Protocol Upgrade Proposal @@ -215,8 +217,7 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address \ ---use-new-governance +--zksync-address ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -228,7 +229,6 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -241,7 +241,6 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -254,6 +253,5 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ ---new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dfea3a3bfc35..bd7df8ab456b 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,12 +3,10 @@ import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-c import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, - GovernanceFactory, StateTransitionManagerFactory, ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgraderFactory } from 'system-contracts/typechain'; import { getCommonDataFileName, @@ -29,12 +27,26 @@ import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); +export enum Action { + Add = 0, + Replace = 1, + Remove = 2 +} + export interface DiamondCutData { facetCuts: FacetCut[]; initAddress: string; initCalldata: string; } +export interface ChainCreationParams { + genesisUpgrade: string; + genesisBatchHash: string; + genesisIndexRepeatedStorageChanges: number; + genesisBatchCommitment: string; + diamondCut: DiamondCutData; +} + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; @@ -89,7 +101,6 @@ export interface ProposedUpgrade { postUpgradeCalldata: BytesLike; upgradeTimestamp: ethers.BigNumber; newProtocolVersion: BigNumberish; - newAllowList: string; } function buildNoopL2UpgradeTx(): L2CanonicalTransaction { @@ -123,10 +134,8 @@ export function buildProposeUpgrade( bootloaderHash?: BytesLike, defaultAccountHash?: BytesLike, verifier?: string, - newAllowList?: string, l2ProtocolUpgradeTx?: L2CanonicalTransaction ): ProposedUpgrade { - newAllowList = newAllowList ?? ethers.constants.AddressZero; bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; @@ -142,8 +151,7 @@ export function buildProposeUpgrade( postUpgradeCalldata, upgradeTimestamp, factoryDeps: [], - newProtocolVersion, - newAllowList + newProtocolVersion }; } @@ -171,43 +179,6 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -interface GovernanceTx { - scheduleCalldata: string; - executeCalldata: string; - operation: any; -} - -function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { - const govCall = { - target: target, - value: 0, - data: data - }; - - const operation = { - calls: [govCall], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero - }; - - const governance = new GovernanceFactory(); - - // Get transaction data of the `scheduleTransparent` - const scheduleCalldata = governance.interface.encodeFunctionData('scheduleTransparent', [ - operation, - 0 // delay - ]); - - // Get transaction data of the `execute` - const executeCalldata = governance.interface.encodeFunctionData('execute', [operation]); - - return { - scheduleCalldata, - executeCalldata, - operation - }; -} - function prepareChainAdminCalldata(target: string, data: BytesLike): string { const call = { target: target, @@ -221,15 +192,18 @@ function prepareChainAdminCalldata(target: string, data: BytesLike): string { return calldata; } -export function prepareTransparentUpgradeCalldataForNewGovernance( +export function prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, newProtocolVersion, initCalldata, upgradeAddress: string, facetCuts: FacetCut[], - stmAddress: string, zksyncAddress: string, + genesisUpgradeAddress: string, + genesisBatchHash: string, + genesisIndexRepeatedStorageChanges: number, + genesisBatchCommitment: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -238,6 +212,21 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( initAddress: upgradeAddress, initCalldata }; + + let chainCreationDiamondCut: DiamondCutData = { + facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), + initAddress: genesisUpgradeAddress, + initCalldata: '0x' + }; + + let chainCreationParams: ChainCreationParams = { + genesisUpgrade: genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, + diamondCut: chainCreationDiamondCut + }; + // Prepare calldata for STM let stm = new StateTransitionManagerFactory(); const stmUpgradeCalldata = stm.interface.encodeFunctionData('setNewVersionUpgrade', [ @@ -247,8 +236,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( newProtocolVersion ]); - const { scheduleCalldata: stmScheduleTransparentOperation, executeCalldata: stmExecuteOperation } = - prepareGovernanceTxs(stmAddress, stmUpgradeCalldata); + const stmSetChainCreationCalldata = stm.interface.encodeFunctionData('setChainCreationParams', [ + chainCreationParams + ]); // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); @@ -257,30 +247,13 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( diamondCut ]); - const { - scheduleCalldata: scheduleTransparentOperation, - executeCalldata: executeOperation, - operation: governanceOperation - } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); - - const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); - - const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); - const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( - zksyncAddress, - legacyScheduleTransparentOperation - ); + const chainAdminUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); let result: any = { - stmScheduleTransparentOperation, - stmExecuteOperation, - scheduleTransparentOperation, - executeOperation, - newExecuteChainUpgradeCalldata, + stmUpgradeCalldata, + chainAdminUpgradeCalldata, diamondCut, - governanceOperation, - legacyScheduleOperation, - legacyExecuteOperation + stmSetChainCreationCalldata }; if (prepareDirectOperation) { @@ -290,13 +263,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( const stmDirecUpgradeCalldata = stm.interface.encodeFunctionData('executeUpgrade', [chainId, diamondCut]); - const { scheduleCalldata: stmScheduleOperationDirect, executeCalldata: stmExecuteOperationDirect } = - prepareGovernanceTxs(stmAddress, stmDirecUpgradeCalldata); - result = { ...result, - stmScheduleOperationDirect, - stmExecuteOperationDirect + stmDirecUpgradeCalldata }; } @@ -305,16 +274,16 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( export function buildDefaultUpgradeTx( environment, - diamondUpgradeProposalId, upgradeAddress, - l2UpgraderAddress, oldProtocolVersion, oldProtocolVersionDeadline, upgradeTimestamp, - newAllowList, - stmAddress, zksyncAddress, postUpgradeCalldataFlag, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation?, chainId? ) { @@ -389,21 +358,23 @@ export function buildDefaultUpgradeTx( bootloaderHash, defaultAAHash, cryptoVerifierAddress, - newAllowList, l2UpgradeTx ); let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + let upgradeData = prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, packedNewProtocolVersion, l1upgradeCalldata, upgradeAddress, facetCuts, - stmAddress, zksyncAddress, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation, chainId ); @@ -414,7 +385,6 @@ export function buildDefaultUpgradeTx( upgradeAddress, protocolVersionSemVer: newProtocolVersionSemVer, packedProtocolVersion: packedNewProtocolVersion, - diamondUpgradeProposalId, upgradeTimestamp, ...upgradeData }; @@ -423,31 +393,6 @@ export function buildDefaultUpgradeTx( console.log('Default upgrade transactions are generated'); } -async function sendTransaction( - calldata: BytesLike, - privateKey: string, - l1rpc: string, - to: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number -) { - const wallet = getWallet(l1rpc, privateKey); - gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); - nonce = nonce ?? (await wallet.getTransactionCount()); - const tx = await wallet.sendTransaction({ - to, - data: calldata, - value: 0, - gasLimit: 10_000_000, - gasPrice, - nonce - }); - console.log('Transaction hash: ', tx.hash); - await tx.wait(); - console.log('Transaction is executed'); -} - export function getWallet(l1rpc, privateKey) { if (!l1rpc) { l1rpc = web3Url(); @@ -462,99 +407,6 @@ export function getWallet(l1rpc, privateKey) { ).connect(provider); } -async function sendPreparedTx( - privateKey: string, - l1rpc: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - governanceAddr: string, - transactionsJsonField: string, - logText: string -) { - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const calldata = transactions[transactionsJsonField]; - - console.log(`${logText} for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(calldata, privateKey, l1rpc, governanceAddr, environment, gasPrice, nonce); -} - -async function cancelUpgrade( - privateKey: string, - l1rpc: string, - zksyncAddress: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - execute: boolean, - newGovernanceAddress: string -) { - if (newGovernanceAddress != null) { - let wallet = getWallet(l1rpc, privateKey); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); - const operation = transactions.governanceOperation; - - const operationId = await governance.hashOperation(operation); - - console.log(`Cancel upgrade operation with id: ${operationId}`); - if (execute) { - const tx = await governance.cancel(operationId); - await tx.wait(); - console.log('Operation canceled'); - } else { - const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); - console.log(`Cancel upgrade calldata: ${calldata}`); - } - } else { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); - - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction( - cancelUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); - } - } -} - -async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - // We don't care about the wallet here, we just need to make a get call. - let wallet = getWallet(l1rpc, undefined); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - let proposalId = await zkSync.getCurrentProposalId(); - proposalId = proposalId.add(1); - console.log( - `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( - await wallet.provider.getNetwork() - )}` - ); - return proposalId; -} - export const command = new Command('transactions').description( 'prepare the transactions and their calldata for the upgrade' ); @@ -564,223 +416,31 @@ command .requiredOption('--upgrade-timestamp ') .option('--upgrade-address ') .option('--environment ') - .option('--new-allow-list ') - .option('--l2-upgrader-address ') - .option('--diamond-upgrade-proposal-id ') .option('--old-protocol-version ') .option('--old-protocol-version-deadline ') .option('--l1rpc ') .option('--zksync-address ') - .option('--state-transition-manager-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--use-new-governance') - .option('--post-upgrade-calldata') + .option('--post-upgrade-calldata ') + .option('--genesis-upgrade-address ') + .option('--genesis-batch-hash ') + .option('--genesis-index-repeated-storage-changes ') + .option('--genesis-batch-commitment ') .action(async (options) => { - if (!options.useNewGovernance) { - // TODO(X): remove old governance functionality from the protocol upgrade tool - throw new Error('Old governance is not supported anymore'); - } - - let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId && !options.useNewGovernance) { - diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); - } - buildDefaultUpgradeTx( options.environment, - diamondUpgradeProposalId, options.upgradeAddress, - options.l2UpgraderAddress, options.oldProtocolVersion, options.oldProtocolVersionDeadline, options.upgradeTimestamp, - options.newAllowList, - options.stateTransitionManagerAddress, options.zksyncAddress, options.postUpgradeCalldata, + options.genesisUpgradeAddress, + options.genesisBatchHash, + options.genesisIndexRepeatedStorageChanges, + options.genesisBatchCommitment, options.prepareDirectOperation, options.chainId ); }); - -command - .command('propose-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleTransparentOperation', - 'Proposing upgrade for STM' - ); - }); - -command - .command('execute-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperation', - 'Executing upgrade for STM' - ); - }); - -command - .command('propose-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'scheduleTransparentOperation', - 'Proposing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('execute-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'executeOperation', - 'Executing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('propose-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('execute-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('cancel-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--execute') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await cancelUpgrade( - options.privateKey, - options.l1rpc, - options.zksyncAddress, - options.environment, - options.gasPrice, - options.nonce, - options.execute, - options.newGovernance - ); - }); diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 063777a671b5..dc716a0b257b 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -16,7 +16,8 @@ const IMAGES = [ 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', - 'verified-sources-fetcher' + 'verified-sources-fetcher', + 'prover-autoscaler' ]; const DOCKER_REGISTRIES = ['us-docker.pkg.dev/matterlabs-infra/matterlabs-docker', 'matterlabs']; @@ -76,7 +77,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'contract-verifier', 'prover-fri-gateway', 'prover-job-monitor', - 'snapshots-creator' + 'snapshots-creator', + 'prover-autoscaler' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] : [`latest2.0`, 'latest']; diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs index b074e0774c97..c25b624b5d43 100644 --- a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -36,19 +36,11 @@ pub struct Namespace { pub pods: HashMap, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct Cluster { pub name: String, pub namespaces: HashMap, } -impl Default for Cluster { - fn default() -> Self { - Self { - name: "".to_string(), - namespaces: HashMap::new(), - } - } -} #[derive(Debug, Default, Clone, Serialize, Deserialize)] pub struct Clusters { diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs index 9f37c4d11675..dd3f3cf1ad3a 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -56,6 +56,7 @@ pub struct Scaler { /// Which cluster to use first. cluster_priorities: HashMap, + max_provers: HashMap>, prover_speed: HashMap, long_pending_duration: chrono::Duration, } @@ -87,6 +88,7 @@ impl Scaler { watcher, queuer, cluster_priorities: config.cluster_priorities, + max_provers: config.max_provers, prover_speed: config.prover_speed, long_pending_duration: chrono::Duration::seconds( config.long_pending_duration.whole_seconds(), @@ -112,7 +114,12 @@ impl Scaler { let e = gp_map.entry(gpu).or_insert(GPUPool { name: cluster.name.clone(), gpu, - max_pool_size: 100, // TODO: get from the agent. + max_pool_size: self + .max_provers + .get(&cluster.name) + .and_then(|inner_map| inner_map.get(&gpu)) + .copied() + .unwrap_or(0), ..Default::default() }); @@ -265,23 +272,49 @@ impl Scaler { } } - tracing::debug!("run result: provers {:?}, total: {}", &provers, total); + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &provers, + total + ); provers } } +/// is_namespace_running returns true if there are some pods running in it. +fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { + clusters + .clusters + .values() + .flat_map(|v| v.namespaces.iter()) + .filter_map(|(k, v)| if k == namespace { Some(v) } else { None }) + .flat_map(|v| v.deployments.values()) + .map( + |d| d.running + d.desired, // If there is something running or expected to run, we + // should consider the namespace. + ) + .sum::() + > 0 +} + #[async_trait::async_trait] impl Task for Scaler { async fn invoke(&self) -> anyhow::Result<()> { let queue = self.queuer.get_queue().await.unwrap(); - // TODO: Check that clusters data is ready. - let clusters = self.watcher.clusters.lock().await; + let guard = self.watcher.data.lock().await; + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } + for (ns, ppv) in &self.namespaces { let q = queue.queue.get(ppv).cloned().unwrap_or(0); - if q > 0 { - let provers = self.run(ns, q, &clusters); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.run(ns, q, &guard.clusters); for (k, num) in &provers { AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] .set(*num as u64); @@ -302,7 +335,7 @@ mod tests { use super::*; use crate::{ - cluster_types::{self, Deployment, Namespace, Pod}, + cluster_types::{Deployment, Namespace, Pod}, global::{queuer, watcher}, }; @@ -310,14 +343,19 @@ mod tests { fn test_run() { let watcher = watcher::Watcher { cluster_agents: vec![], - clusters: Arc::new(Mutex::new(cluster_types::Clusters { - ..Default::default() - })), + data: Arc::new(Mutex::new(watcher::WatchedData::default())), }; let queuer = queuer::Queuer { prover_job_monitor_url: "".to_string(), }; - let scaler = Scaler::new(watcher, queuer, ProverAutoscalerScalerConfig::default()); + let scaler = Scaler::new( + watcher, + queuer, + ProverAutoscalerScalerConfig { + max_provers: HashMap::from([("foo".to_string(), HashMap::from([(Gpu::L4, 100)]))]), + ..Default::default() + }, + ); let got = scaler.run( &"prover".to_string(), 1499, @@ -355,6 +393,6 @@ mod tests { }, 3, )]); - assert!(got == want); + assert_eq!(got, want); } } diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs index ef3ebd3b8193..01fa68c60f84 100644 --- a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -1,6 +1,6 @@ use std::{collections::HashMap, sync::Arc}; -use anyhow::{Context, Ok}; +use anyhow::{anyhow, Context, Ok, Result}; use futures::future; use reqwest::Method; use tokio::sync::Mutex; @@ -12,15 +12,31 @@ use crate::{ task_wiring::Task, }; +#[derive(Default)] +pub struct WatchedData { + pub clusters: Clusters, + pub is_ready: Vec, +} + +pub fn check_is_ready(v: &Vec) -> Result<()> { + for b in v { + if !b { + return Err(anyhow!("Clusters data is not ready")); + } + } + Ok(()) +} + #[derive(Clone)] pub struct Watcher { /// List of base URLs of all agents. pub cluster_agents: Vec>, - pub clusters: Arc>, + pub data: Arc>, } impl Watcher { pub fn new(agent_urls: Vec) -> Self { + let size = agent_urls.len(); Self { cluster_agents: agent_urls .into_iter() @@ -31,8 +47,11 @@ impl Watcher { ) }) .collect(), - clusters: Arc::new(Mutex::new(Clusters { - clusters: HashMap::new(), + data: Arc::new(Mutex::new(WatchedData { + clusters: Clusters { + clusters: HashMap::new(), + }, + is_ready: vec![false; size], })), } } @@ -45,7 +64,8 @@ impl Task for Watcher { .cluster_agents .clone() .into_iter() - .map(|a| { + .enumerate() + .map(|(i, a)| { tracing::debug!("Getting cluster data from agent {}.", a); tokio::spawn(async move { let url: String = a @@ -55,13 +75,14 @@ impl Task for Watcher { .to_string(); let response = send_request_with_retries(&url, 5, Method::GET, None, None).await; - response + let res = response .map_err(|err| { anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") })? .json::() .await - .context("Failed to read response as json") + .context("Failed to read response as json"); + Ok((i, res)) }) }) .collect(); @@ -71,18 +92,16 @@ impl Task for Watcher { .await .into_iter() .map(|h| async move { - let c = h.unwrap().unwrap(); - self.clusters - .lock() - .await - .clusters - .insert(c.name.clone(), c); + let (i, res) = h??; + let c = res?; + let mut guard = self.data.lock().await; + guard.clusters.clusters.insert(c.name.clone(), c); + guard.is_ready[i] = true; Ok(()) }) .collect::>(), ) - .await - .unwrap(); + .await?; Ok(()) } diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs index 196bd6deb81e..e3aec1fbd392 100644 --- a/prover/crates/bin/prover_autoscaler/src/main.rs +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -80,24 +80,21 @@ async fn main() -> anyhow::Result<()> { let _ = rustls::crypto::ring::default_provider().install_default(); let client = kube::Client::try_default().await?; - tracing::info!("Starting ProverAutoscaler"); - let mut tasks = vec![]; match opt.job { AutoscalerType::Agent => { + let cluster = opt + .cluster_name + .context("cluster_name is required for Agent")?; + tracing::info!("Starting ProverAutoscaler Agent for cluster {}", cluster); let agent_config = general_config.agent_config.context("agent_config")?; let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name - let watcher = Watcher::new( - client.clone(), - opt.cluster_name - .context("cluster_name is required for Agent")?, - agent_config.namespaces, - ); + let watcher = Watcher::new(client.clone(), cluster, agent_config.namespaces); let scaler = Scaler { client }; tasks.push(tokio::spawn(watcher.clone().run())); tasks.push(tokio::spawn(agent::run_server( @@ -108,6 +105,7 @@ async fn main() -> anyhow::Result<()> { ))) } AutoscalerType::Scaler => { + tracing::info!("Starting ProverAutoscaler Scaler"); let scaler_config = general_config.scaler_config.context("scaler_config")?; let interval = scaler_config.scaler_run_interval.unsigned_abs(); let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 203fb6e8cecf..0465d888f612 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -47,7 +47,7 @@ We will be running a bunch of binaries, it's recommended to run each in a separa ### Server ``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` ### Proof data handler diff --git a/zkstack_cli/crates/zkstack/README.md b/zkstack_cli/crates/zkstack/README.md index 6e529efc2009..f352d96fec42 100644 --- a/zkstack_cli/crates/zkstack/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -508,7 +508,11 @@ Initialize prover - `--public-location ` - `--public-project-id ` - `--bellman-cuda-dir ` -- `--download-key ` +- `--bellman-cuda` + + Possible values: `true`, `false` + +- `--setup-compressor-key ` Possible values: `true`, `false` @@ -564,6 +568,10 @@ Run prover Possible values: `true`, `false` +- `--tag' - Tag of the docker image to run. + + Default value is `latest2.0` but you can specify your prefered one. + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 280b5b2e91d8..fab798993025 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -61,7 +61,7 @@ pub struct ProverInitArgs { pub bellman_cuda: Option, #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub setup_compressor_keys: Option, + pub setup_compressor_key: Option, #[clap(flatten)] pub compressor_keys_args: CompressorKeysArgs, @@ -363,7 +363,7 @@ impl ProverInitArgs { }); } - let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + let download_key = self.clone().setup_compressor_key.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) .ask() diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index d7600ba2d31f..b79af777673c 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -35,6 +35,8 @@ pub struct ProverRunArgs { pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, + #[clap(long)] + pub tag: Option, } #[derive( @@ -300,6 +302,8 @@ impl ProverRunArgs { .ask() }); + let tag = self.tag.unwrap_or("latest2.0".to_string()); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, @@ -307,6 +311,7 @@ impl ProverRunArgs { fri_prover_args: self.fri_prover_args, circuit_prover_args, docker: Some(docker), + tag: Some(tag), }) } } diff --git a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs index 863816b9ae69..85495d124041 100644 --- a/zkstack_cli/crates/zkstack/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -33,7 +33,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let application_args = component.get_application_args(in_docker)?; let additional_args = - component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; + component.get_additional_args(in_docker, args.clone(), &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -83,6 +83,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() run_dockerized_component( shell, component.image_name(), + &args.tag.unwrap(), &application_args, &additional_args, message, @@ -110,6 +111,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() fn run_dockerized_component( shell: &Shell, image_name: &str, + tag: &str, application_args: &[String], args: &[String], message: &'static str, @@ -124,7 +126,7 @@ fn run_dockerized_component( let mut cmd = Cmd::new(cmd!( shell, - "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name}:{tag} {args...}" )); cmd = cmd.with_force_run(); diff --git a/zkstack_cli/crates/zkstack/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs index ba00af77b5a6..b7c4d2a20709 100644 --- a/zkstack_cli/crates/zkstack/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -17,14 +17,13 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; -pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; -pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = - "matterlabs/witness-vector-generator:latest2.0"; -pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; -pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu:latest2.0"; -pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; -pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor"; pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install index f20ba4dd545a..849f0699bc32 100755 --- a/zkstack_cli/zkstackup/install +++ b/zkstack_cli/zkstackup/install @@ -3,8 +3,7 @@ set -eo pipefail BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -BIN_DIR="$HOME_DIR/.local/bin" +BIN_DIR="$HOME/.local/bin" BIN_PATH="$BIN_DIR/zkstackup" main() { diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup index 20a061620f9a..e91bbc17905c 100755 --- a/zkstack_cli/zkstackup/zkstackup +++ b/zkstack_cli/zkstackup/zkstackup @@ -1,8 +1,7 @@ #!/usr/bin/env bash set -eo pipefail -HOME_DIR=${XDG_CONFIG_HOME:-$HOME} -LOCAL_DIR=${LOCAL_DIR:-"$HOME_DIR/.local"} +LOCAL_DIR="$HOME/.local/" BIN_DIR="$LOCAL_DIR/bin" BINS=()