From f29fd12e6f84e699cc0f0ba0e189d556fce12485 Mon Sep 17 00:00:00 2001 From: tomg10 Date: Wed, 16 Oct 2024 01:13:31 +0200 Subject: [PATCH] better handling of failed L1 transactions --- core/lib/circuit_breaker/src/l1_txs.rs | 31 ------- core/lib/circuit_breaker/src/lib.rs | 1 - ...ec427a099492905a1feee512dc43f39d10047.json | 15 ---- ...91a8d67abcdb743a619ed0d1b9c16206a3c20.json | 12 --- ...8c5de2c9f8f3cf22d0f1145ae67879039e28d.json | 56 ------------- ...76fb01e6629e8c982c265f2af1d5000090572.json | 20 ----- core/lib/dal/src/eth_sender_dal.rs | 83 +++++-------------- core/lib/dal/src/models/storage_eth_tx.rs | 29 +------ core/lib/types/src/eth_sender.rs | 11 --- core/node/block_reverter/src/lib.rs | 15 ++++ core/node/eth_sender/src/eth_tx_aggregator.rs | 57 +++++++------ core/node/eth_sender/src/eth_tx_manager.rs | 15 +++- .../layers/eth_sender/aggregator.rs | 12 +-- .../layers/eth_sender/manager.rs | 12 +-- 14 files changed, 85 insertions(+), 284 deletions(-) delete mode 100644 core/lib/circuit_breaker/src/l1_txs.rs delete mode 100644 core/lib/dal/.sqlx/query-532a80b0873871896dd318beba5ec427a099492905a1feee512dc43f39d10047.json delete mode 100644 core/lib/dal/.sqlx/query-6b327df84d2b3b31d02db35fd5d91a8d67abcdb743a619ed0d1b9c16206a3c20.json delete mode 100644 core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json delete mode 100644 core/lib/dal/.sqlx/query-dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572.json diff --git a/core/lib/circuit_breaker/src/l1_txs.rs b/core/lib/circuit_breaker/src/l1_txs.rs deleted file mode 100644 index 3236bd986312..000000000000 --- a/core/lib/circuit_breaker/src/l1_txs.rs +++ /dev/null @@ -1,31 +0,0 @@ -use anyhow::Context as _; -use zksync_dal::{ConnectionPool, Core, CoreDal}; - -use crate::{CircuitBreaker, CircuitBreakerError}; - -#[derive(Debug)] -pub struct FailedL1TransactionChecker { - pub pool: ConnectionPool, -} - -#[async_trait::async_trait] -impl CircuitBreaker for FailedL1TransactionChecker { - fn name(&self) -> &'static str { - "failed_l1_transaction" - } - - async fn check(&self) -> Result<(), CircuitBreakerError> { - let number_of_failed_transactions = self - .pool - .connection_tagged("circuit_breaker") - .await? - .eth_sender_dal() - .get_number_of_failed_transactions() - .await - .context("cannot get number of failed L1 transactions")?; - if number_of_failed_transactions > 0 { - return Err(CircuitBreakerError::FailedL1Transaction); - } - Ok(()) - } -} diff --git a/core/lib/circuit_breaker/src/lib.rs b/core/lib/circuit_breaker/src/lib.rs index adb56db1f3e5..d8ae28802a86 100644 --- a/core/lib/circuit_breaker/src/lib.rs +++ b/core/lib/circuit_breaker/src/lib.rs @@ -3,7 +3,6 @@ use std::{fmt, sync::Arc, time::Duration}; use thiserror::Error; use tokio::sync::{watch, Mutex}; -pub mod l1_txs; mod metrics; pub mod replication_lag; diff --git a/core/lib/dal/.sqlx/query-532a80b0873871896dd318beba5ec427a099492905a1feee512dc43f39d10047.json b/core/lib/dal/.sqlx/query-532a80b0873871896dd318beba5ec427a099492905a1feee512dc43f39d10047.json deleted file mode 100644 index 629dca2ea7f0..000000000000 --- a/core/lib/dal/.sqlx/query-532a80b0873871896dd318beba5ec427a099492905a1feee512dc43f39d10047.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE eth_txs_history\n SET\n sent_at_block = $2,\n sent_at = NOW()\n WHERE\n id = $1\n AND sent_at_block IS NULL\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int4", - "Int4" - ] - }, - "nullable": [] - }, - "hash": "532a80b0873871896dd318beba5ec427a099492905a1feee512dc43f39d10047" -} diff --git a/core/lib/dal/.sqlx/query-6b327df84d2b3b31d02db35fd5d91a8d67abcdb743a619ed0d1b9c16206a3c20.json b/core/lib/dal/.sqlx/query-6b327df84d2b3b31d02db35fd5d91a8d67abcdb743a619ed0d1b9c16206a3c20.json deleted file mode 100644 index d00622a1f5fa..000000000000 --- a/core/lib/dal/.sqlx/query-6b327df84d2b3b31d02db35fd5d91a8d67abcdb743a619ed0d1b9c16206a3c20.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM eth_txs\n WHERE\n id >= (\n SELECT\n MIN(id)\n FROM\n eth_txs\n WHERE\n has_failed = TRUE\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "6b327df84d2b3b31d02db35fd5d91a8d67abcdb743a619ed0d1b9c16206a3c20" -} diff --git a/core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json b/core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json deleted file mode 100644 index 36ecf511a9ae..000000000000 --- a/core/lib/dal/.sqlx/query-8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d.json +++ /dev/null @@ -1,56 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n eth_txs_history.id,\n eth_txs_history.eth_tx_id,\n eth_txs_history.tx_hash,\n eth_txs_history.base_fee_per_gas,\n eth_txs_history.priority_fee_per_gas,\n eth_txs_history.signed_raw_tx,\n eth_txs.nonce\n FROM\n eth_txs_history\n JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id\n WHERE\n eth_txs_history.sent_at_block IS NULL\n AND eth_txs.confirmed_eth_tx_history_id IS NULL\n ORDER BY\n eth_txs_history.id DESC\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "id", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "eth_tx_id", - "type_info": "Int4" - }, - { - "ordinal": 2, - "name": "tx_hash", - "type_info": "Text" - }, - { - "ordinal": 3, - "name": "base_fee_per_gas", - "type_info": "Int8" - }, - { - "ordinal": 4, - "name": "priority_fee_per_gas", - "type_info": "Int8" - }, - { - "ordinal": 5, - "name": "signed_raw_tx", - "type_info": "Bytea" - }, - { - "ordinal": 6, - "name": "nonce", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - false, - false, - false, - false, - false, - true, - false - ] - }, - "hash": "8de8fd9aa54e2285a14daf95af18c5de2c9f8f3cf22d0f1145ae67879039e28d" -} diff --git a/core/lib/dal/.sqlx/query-dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572.json b/core/lib/dal/.sqlx/query-dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572.json deleted file mode 100644 index 9669622f5cf2..000000000000 --- a/core/lib/dal/.sqlx/query-dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "SELECT COUNT(miniblocks.number) FROM miniblocks WHERE l1_batch_number IS NULL", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "count", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [] - }, - "nullable": [ - null - ] - }, - "hash": "dc16d0fac093a52480b66dfcb5976fb01e6629e8c982c265f2af1d5000090572" -} diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 4ce76547ac9b..27b37e6a123c 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -5,14 +5,12 @@ use sqlx::types::chrono::{DateTime, Utc}; use zksync_db_connection::{connection::Connection, interpolate_query, match_query_as}; use zksync_types::{ aggregated_operations::AggregatedActionType, - eth_sender::{EthTx, EthTxBlobSidecar, TxHistory, TxHistoryToSend}, + eth_sender::{EthTx, EthTxBlobSidecar, TxHistory}, Address, L1BatchNumber, H256, U256, }; use crate::{ - models::storage_eth_tx::{ - L1BatchEthSenderStats, StorageEthTx, StorageTxHistory, StorageTxHistoryToSend, - }, + models::storage_eth_tx::{L1BatchEthSenderStats, StorageEthTx, StorageTxHistory}, Core, }; @@ -194,33 +192,6 @@ impl EthSenderDal<'_, '_> { Ok(txs.into_iter().map(|tx| tx.into()).collect()) } - pub async fn get_unsent_txs(&mut self) -> sqlx::Result> { - let txs = sqlx::query_as!( - StorageTxHistoryToSend, - r#" - SELECT - eth_txs_history.id, - eth_txs_history.eth_tx_id, - eth_txs_history.tx_hash, - eth_txs_history.base_fee_per_gas, - eth_txs_history.priority_fee_per_gas, - eth_txs_history.signed_raw_tx, - eth_txs.nonce - FROM - eth_txs_history - JOIN eth_txs ON eth_txs.id = eth_txs_history.eth_tx_id - WHERE - eth_txs_history.sent_at_block IS NULL - AND eth_txs.confirmed_eth_tx_history_id IS NULL - ORDER BY - eth_txs_history.id DESC - "#, - ) - .fetch_all(self.storage.conn()) - .await?; - Ok(txs.into_iter().map(|tx| tx.into()).collect()) - } - #[allow(clippy::too_many_arguments)] pub async fn save_eth_tx( &mut self, @@ -321,29 +292,6 @@ impl EthSenderDal<'_, '_> { .map(|row| row.id as u32)) } - pub async fn set_sent_at_block( - &mut self, - eth_txs_history_id: u32, - sent_at_block: u32, - ) -> sqlx::Result<()> { - sqlx::query!( - r#" - UPDATE eth_txs_history - SET - sent_at_block = $2, - sent_at = NOW() - WHERE - id = $1 - AND sent_at_block IS NULL - "#, - eth_txs_history_id as i32, - sent_at_block as i32 - ) - .execute(self.storage.conn()) - .await?; - Ok(()) - } - pub async fn remove_tx_history(&mut self, eth_txs_history_id: u32) -> sqlx::Result<()> { sqlx::query!( r#" @@ -690,15 +638,7 @@ impl EthSenderDal<'_, '_> { sqlx::query!( r#" DELETE FROM eth_txs - WHERE - id >= ( - SELECT - MIN(id) - FROM - eth_txs - WHERE - has_failed = TRUE - ) + WHERE has_failed = TRUE "# ) .execute(self.storage.conn()) @@ -706,6 +646,23 @@ impl EthSenderDal<'_, '_> { Ok(()) } + pub async fn count_all_inflight_txs(&mut self) -> anyhow::Result { + sqlx::query!( + r#" + SELECT + COUNT(*) + FROM + eth_txs + WHERE + confirmed_eth_tx_history_id IS NULL AND has_failed = FALSE + "# + ) + .fetch_one(self.storage.conn()) + .await? + .count + .context("count field is missing") + } + pub async fn delete_eth_txs(&mut self, last_batch_to_keep: L1BatchNumber) -> sqlx::Result<()> { sqlx::query!( r#" diff --git a/core/lib/dal/src/models/storage_eth_tx.rs b/core/lib/dal/src/models/storage_eth_tx.rs index a47f6acfff46..6f8d352b05e7 100644 --- a/core/lib/dal/src/models/storage_eth_tx.rs +++ b/core/lib/dal/src/models/storage_eth_tx.rs @@ -3,7 +3,7 @@ use std::str::FromStr; use sqlx::types::chrono::NaiveDateTime; use zksync_types::{ aggregated_operations::AggregatedActionType, - eth_sender::{EthTx, TxHistory, TxHistoryToSend}, + eth_sender::{EthTx, TxHistory}, Address, L1BatchNumber, Nonce, SLChainId, H256, }; @@ -39,17 +39,6 @@ pub struct L1BatchEthSenderStats { pub mined: Vec<(AggregatedActionType, L1BatchNumber)>, } -#[derive(Clone, Debug)] -pub struct StorageTxHistoryToSend { - pub id: i32, - pub eth_tx_id: i32, - pub tx_hash: String, - pub priority_fee_per_gas: i64, - pub base_fee_per_gas: i64, - pub signed_raw_tx: Option>, - pub nonce: i64, -} - #[derive(Clone, Debug)] pub struct StorageTxHistory { pub id: i32, @@ -110,19 +99,3 @@ impl From for TxHistory { } } } - -impl From for TxHistoryToSend { - fn from(history: StorageTxHistoryToSend) -> TxHistoryToSend { - TxHistoryToSend { - id: history.id as u32, - eth_tx_id: history.eth_tx_id as u32, - tx_hash: H256::from_str(&history.tx_hash).expect("Incorrect hash"), - base_fee_per_gas: history.base_fee_per_gas as u64, - priority_fee_per_gas: history.priority_fee_per_gas as u64, - signed_raw_tx: history - .signed_raw_tx - .expect("Should rely only on the new txs"), - nonce: Nonce(history.nonce as u32), - } - } -} diff --git a/core/lib/types/src/eth_sender.rs b/core/lib/types/src/eth_sender.rs index 12a5a5a8fb13..d02057bc88aa 100644 --- a/core/lib/types/src/eth_sender.rs +++ b/core/lib/types/src/eth_sender.rs @@ -82,14 +82,3 @@ pub struct TxHistory { pub signed_raw_tx: Vec, pub sent_at_block: Option, } - -#[derive(Clone, Debug)] -pub struct TxHistoryToSend { - pub id: u32, - pub eth_tx_id: u32, - pub base_fee_per_gas: u64, - pub priority_fee_per_gas: u64, - pub tx_hash: H256, - pub signed_raw_tx: Vec, - pub nonce: Nonce, -} diff --git a/core/node/block_reverter/src/lib.rs b/core/node/block_reverter/src/lib.rs index c7397ee475a7..3ea7ff9db590 100644 --- a/core/node/block_reverter/src/lib.rs +++ b/core/node/block_reverter/src/lib.rs @@ -606,6 +606,21 @@ impl BlockReverter { /// Clears failed L1 transactions. pub async fn clear_failed_l1_transactions(&self) -> anyhow::Result<()> { tracing::info!("Clearing failed L1 transactions"); + if self + .connection_pool + .connection() + .await? + .eth_sender_dal() + .count_all_inflight_txs() + .await + .unwrap() + != 0 + { + tracing::error!( + "There are still some in-flight txs, cannot proceed. \ + Please wait for eth-sender to process all in-flight txs and try again!" + ); + } self.connection_pool .connection() .await? diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index a08d16f456a9..a20bb1b8a2d4 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -54,8 +54,6 @@ pub struct EthTxAggregator { l1_multicall3_address: Address, pub(super) state_transition_chain_contract: Address, functions: ZkSyncFunctions, - base_nonce: u64, - base_nonce_custom_commit_sender: Option, rollup_chain_id: L2ChainId, /// If set to `Some` node is operating in the 4844 mode with two operator /// addresses at play: the main one and the custom address for sending commit @@ -88,19 +86,6 @@ impl EthTxAggregator { ) -> Self { let eth_client = eth_client.for_component("eth_tx_aggregator"); let functions = ZkSyncFunctions::default(); - let base_nonce = eth_client.pending_nonce().await.unwrap().as_u64(); - - let base_nonce_custom_commit_sender = match custom_commit_sender_addr { - Some(addr) => Some( - (*eth_client) - .as_ref() - .nonce_at_for_account(addr, BlockNumber::Pending) - .await - .unwrap() - .as_u64(), - ), - None => None, - }; let sl_chain_id = (*eth_client).as_ref().fetch_chain_id().await.unwrap(); @@ -112,8 +97,6 @@ impl EthTxAggregator { l1_multicall3_address, state_transition_chain_contract, functions, - base_nonce, - base_nonce_custom_commit_sender, rollup_chain_id, custom_commit_sender_addr, pool, @@ -402,6 +385,24 @@ impl EthTxAggregator { ) .await { + if storage + .eth_sender_dal() + .get_number_of_failed_transactions() + .await + .unwrap() + != 0 + { + tracing::info!( + "Skipping sending operation of type {} for batches {}-{} \ + as there are failed transactions in database, they can be removed using \ + block-reverter tool with option clear-failed-transactions", + agg_op.get_action_type(), + agg_op.l1_batch_range().start(), + agg_op.l1_batch_range().end() + ); + return Ok(()); + } + if self.config.tx_aggregation_paused { tracing::info!( "Skipping sending operation of type {} for batches {}-{} \ @@ -658,15 +659,23 @@ impl EthTxAggregator { .await .unwrap() .unwrap_or(0); - // Between server starts we can execute some txs using operator account or remove some txs from the database - // At the start we have to consider this fact and get the max nonce. + // We can execute some txs using operator account or remove some txs from the database + // We have to consider this fact and get the max nonce. Ok(if from_addr.is_none() { - db_nonce.max(self.base_nonce) + let base_nonce = self.eth_client.pending_nonce().await.unwrap().as_u64(); + db_nonce.max(base_nonce) } else { - db_nonce.max( - self.base_nonce_custom_commit_sender - .expect("custom base nonce is expected to be initialized; qed"), - ) + let base_nonce_custom_commit_sender = (*self.eth_client) + .as_ref() + .nonce_at_for_account( + self.custom_commit_sender_addr + .expect("custom_commit_sender_addr should not be empty"), + BlockNumber::Pending, + ) + .await + .unwrap() + .as_u64(); + db_nonce.max(base_nonce_custom_commit_sender) }) } } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 7de91a3b7736..6879b87b6bd6 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -461,7 +461,6 @@ impl EthTxManager { tx_status.receipt, failure_reason ); - panic!("We can't operate after tx fail"); } pub async fn confirm_tx( @@ -544,6 +543,20 @@ impl EthTxManager { current_block: L1BlockNumber, operator_type: OperatorType, ) { + // In case of failed transactions, we don't want to send any new transactions, but we + // don't panic to be able to process all in-flight txs (most likely mark them as failed). + // This strategy makes sense to make absolutely sure that we don't remove any transactions + // from database that are confirmed on L1, but not yet marked as such in our database + if storage + .eth_sender_dal() + .get_number_of_failed_transactions() + .await + .unwrap() + != 0 + { + tracing::warn!("Skipping sending new transactions because of failed transactions"); + return; + } let number_inflight_txs = storage .eth_sender_dal() .get_inflight_txs( diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs index 310580aeb3a3..a39cf7226966 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/aggregator.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; use zksync_config::configs::{eth_sender::EthConfig, ContractsConfig}; use zksync_eth_client::BoundEthInterface; use zksync_eth_sender::{Aggregator, EthTxAggregator}; @@ -10,7 +9,7 @@ use crate::{ circuit_breakers::CircuitBreakersResource, eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource, ReplicaPool}, + pools::{MasterPool, PoolResource}, }, service::StopReceiver, task::{Task, TaskId}, @@ -49,7 +48,6 @@ pub struct EthTxAggregatorLayer { #[context(crate = crate)] pub struct Input { pub master_pool: PoolResource, - pub replica_pool: PoolResource, pub eth_client: Option, pub eth_client_blobs: Option, pub object_store: ObjectStoreResource, @@ -94,7 +92,6 @@ impl WiringLayer for EthTxAggregatorLayer { async fn wire(self, input: Self::Input) -> Result { // Get resources. let master_pool = input.master_pool.get().await.unwrap(); - let replica_pool = input.replica_pool.get().await.unwrap(); let eth_client_blobs = input.eth_client_blobs.map(|c| c.0); let object_store = input.object_store.0; @@ -126,13 +123,6 @@ impl WiringLayer for EthTxAggregatorLayer { ) .await; - // Insert circuit breaker. - input - .circuit_breakers - .breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - Ok(Output { eth_tx_aggregator }) } } diff --git a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs index 5462fa575f94..7956efa8b45d 100644 --- a/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs +++ b/core/node/node_framework/src/implementations/layers/eth_sender/manager.rs @@ -1,5 +1,4 @@ use anyhow::Context; -use zksync_circuit_breaker::l1_txs::FailedL1TransactionChecker; use zksync_config::configs::eth_sender::EthConfig; use zksync_eth_sender::EthTxManager; @@ -8,7 +7,7 @@ use crate::{ circuit_breakers::CircuitBreakersResource, eth_interface::{BoundEthInterfaceForBlobsResource, BoundEthInterfaceResource}, gas_adjuster::GasAdjusterResource, - pools::{MasterPool, PoolResource, ReplicaPool}, + pools::{MasterPool, PoolResource}, }, service::StopReceiver, task::{Task, TaskId}, @@ -42,7 +41,6 @@ pub struct EthTxManagerLayer { #[context(crate = crate)] pub struct Input { pub master_pool: PoolResource, - pub replica_pool: PoolResource, pub eth_client: BoundEthInterfaceResource, pub eth_client_blobs: Option, pub gas_adjuster: GasAdjusterResource, @@ -75,7 +73,6 @@ impl WiringLayer for EthTxManagerLayer { async fn wire(self, input: Self::Input) -> Result { // Get resources. let master_pool = input.master_pool.get().await.unwrap(); - let replica_pool = input.replica_pool.get().await.unwrap(); let settlement_mode = self.eth_sender_config.gas_adjuster.unwrap().settlement_mode; let eth_client = input.eth_client.0.clone(); @@ -107,13 +104,6 @@ impl WiringLayer for EthTxManagerLayer { }, ); - // Insert circuit breaker. - input - .circuit_breakers - .breakers - .insert(Box::new(FailedL1TransactionChecker { pool: replica_pool })) - .await; - Ok(Output { eth_tx_manager }) } }