Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Deneb publish and verification optimisations #4736

Closed
wants to merge 3 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 7 additions & 2 deletions beacon_node/beacon_chain/src/blob_verification.rs
Original file line number Diff line number Diff line change
Expand Up @@ -520,7 +520,7 @@ pub fn verify_kzg_for_blob<T: EthSpec>(
pub fn verify_kzg_for_blob_list<T: EthSpec>(
blob_list: &BlobSidecarList<T>,
kzg: &Kzg<T::Kzg>,
) -> Result<(), AvailabilityCheckError> {
) -> Result<Vec<KzgVerifiedBlob<T>>, AvailabilityCheckError> {
let _timer = crate::metrics::start_timer(&crate::metrics::KZG_VERIFICATION_BATCH_TIMES);
let (blobs, (commitments, proofs)): (Vec<_>, (Vec<_>, Vec<_>)) = blob_list
.iter()
Expand All @@ -534,7 +534,12 @@ pub fn verify_kzg_for_blob_list<T: EthSpec>(
)
.map_err(AvailabilityCheckError::Kzg)?
{
Ok(())
Ok(blob_list
.into_iter()
.map(|blob_sidecar| KzgVerifiedBlob {
blob: blob_sidecar.clone(),
})
.collect())
} else {
Err(AvailabilityCheckError::KzgVerificationFailed)
}
Expand Down
16 changes: 9 additions & 7 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -197,16 +197,18 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
) -> Result<Availability<T::EthSpec>, AvailabilityCheckError> {
let mut verified_blobs = vec![];
if let Some(kzg) = self.kzg.as_ref() {
for blob in blobs.iter().flatten() {
verified_blobs.push(verify_kzg_for_blob(blob.clone(), kzg)?)
}
let blob_list: BlobSidecarList<T::EthSpec> = blobs
.into_iter()
.flat_map(|blob| blob.clone())
.collect::<Vec<_>>()
.into();
let verified_blob_list = verify_kzg_for_blob_list(&blob_list, kzg)?;
self.availability_cache
.put_kzg_verified_blobs(block_root, verified_blob_list)
} else {
return Err(AvailabilityCheckError::KzgNotInitialized);
};
self.availability_cache
.put_kzg_verified_blobs(block_root, verified_blobs)
}
}

/// This first validates the KZG commitments included in the blob sidecar.
Expand Down
8 changes: 8 additions & 0 deletions beacon_node/http_api/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4705,6 +4705,14 @@ fn publish_pubsub_message<T: EthSpec>(
)
}

/// Publish a message to the libp2p pubsub network.
fn publish_pubsub_messages<T: EthSpec>(
network_tx: &UnboundedSender<NetworkMessage<T>>,
messages: Vec<PubsubMessage<T>>,
) -> Result<(), warp::Rejection> {
publish_network_message(network_tx, NetworkMessage::Publish { messages })
}

/// Publish a message to the libp2p network.
fn publish_network_message<T: EthSpec>(
network_tx: &UnboundedSender<NetworkMessage<T>>,
Expand Down
16 changes: 7 additions & 9 deletions beacon_node/http_api/src/publish_blocks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -85,19 +85,17 @@ pub async fn publish_block<T: BeaconChainTypes, B: IntoGossipVerifiedBlockConten
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
}
SignedBeaconBlock::Deneb(_) => {
crate::publish_pubsub_message(&sender, PubsubMessage::BeaconBlock(block.clone()))
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
let mut pubsub_messages = vec![PubsubMessage::BeaconBlock(block.clone())];
if let Some(signed_blobs) = blobs_opt {
for (blob_index, blob) in signed_blobs.into_iter().enumerate() {
crate::publish_pubsub_message(
&sender,
PubsubMessage::BlobSidecar(Box::new((blob_index as u64, blob))),
)
.map_err(|_| {
BlockError::BeaconChainError(BeaconChainError::UnableToPublish)
})?;
pubsub_messages.push(PubsubMessage::BlobSidecar(Box::new((
blob_index as u64,
blob,
))));
}
}
crate::publish_pubsub_messages(&sender, pubsub_messages)
.map_err(|_| BlockError::BeaconChainError(BeaconChainError::UnableToPublish))?;
}
};
Ok(())
Expand Down
13 changes: 11 additions & 2 deletions beacon_node/network/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -295,13 +295,22 @@ lazy_static! {
*/
pub static ref BEACON_BLOB_GOSSIP_PROPAGATION_VERIFICATION_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_blob_gossip_propagation_verification_delay_time",
"Duration between when the blob is received and when it is verified for propagation.",
"Duration between when the blob is received over gossip and when it is verified for propagation.",
// [0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5]
decimal_buckets(-3,-1)
);
pub static ref BEACON_BLOB_GOSSIP_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_blob_gossip_slot_start_delay_time",
"Duration between when the blob is received and the start of the slot it belongs to.",
"Duration between when the blob is received over gossip and the start of the slot it belongs to.",
// Create a custom bucket list for greater granularity in block delay
Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0])
// NOTE: Previous values, which we may want to switch back to.
// [0.1, 0.2, 0.5, 1, 2, 5, 10, 20, 50]
//decimal_buckets(-1,2)
);
pub static ref BEACON_BLOB_RPC_SLOT_START_DELAY_TIME: Result<Histogram> = try_create_histogram_with_buckets(
"beacon_blob_rpc_slot_start_delay_time",
"Duration between when a blob is received over rpc and the start of the slot it belongs to.",
// Create a custom bucket list for greater granularity in block delay
Ok(vec![0.1, 0.2, 0.3,0.4,0.5,0.75,1.0,1.25,1.5,1.75,2.0,2.5,3.0,3.5,4.0,5.0,6.0,7.0,8.0,9.0,10.0,15.0,20.0])
// NOTE: Previous values, which we may want to switch back to.
Expand Down
55 changes: 53 additions & 2 deletions beacon_node/network/src/network_beacon_processor/sync_methods.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,8 @@ use beacon_chain::block_verification_types::{AsBlock, RpcBlock};
use beacon_chain::data_availability_checker::AvailabilityCheckError;
use beacon_chain::data_availability_checker::MaybeAvailableBlock;
use beacon_chain::{
observed_block_producers::Error as ObserveError, validator_monitor::get_block_delay_ms,
observed_block_producers::Error as ObserveError,
validator_monitor::{get_block_delay_ms, get_slot_delay_ms},
AvailabilityProcessingStatus, BeaconChainError, BeaconChainTypes, BlockError,
ChainSegmentResult, HistoricalBlockError, NotifyExecutionLayer,
};
Expand Down Expand Up @@ -277,7 +278,7 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
self: Arc<NetworkBeaconProcessor<T>>,
block_root: Hash256,
blobs: FixedBlobSidecarList<T::EthSpec>,
_seen_timestamp: Duration,
seen_timestamp: Duration,
process_type: BlockProcessType,
) {
let Some(slot) = blobs
Expand All @@ -287,8 +288,58 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
return;
};

let indices: Vec<_> = blobs
.iter()
.filter_map(|blob_opt| blob_opt.as_ref().map(|blob| blob.index))
.collect();

debug!(
self.log,
"RPC blobs received";
"indices" => ?indices,
"block_root" => %block_root,
"slot" => %slot,
);

// Note: this metric is useful to gauge how long it takes to receive blobs requested
// over rpc. Since we always send the request for block components at `slot_clock.single_lookup_delay()`
// we can use that as a baseline to measure against.
let delay = get_slot_delay_ms(seen_timestamp, slot, &self.chain.slot_clock)
.saturating_sub(self.chain.slot_clock.single_lookup_delay());

metrics::observe_duration(&metrics::BEACON_BLOB_RPC_SLOT_START_DELAY_TIME, delay);

let result = self.chain.process_rpc_blobs(slot, block_root, blobs).await;

match &result {
Ok(AvailabilityProcessingStatus::Imported(hash)) => {
debug!(
self.log,
"Block components retrieved";
"result" => "imported block and blobs",
"slot" => %slot,
"block_hash" => %hash,
);
}
Ok(AvailabilityProcessingStatus::MissingComponents(_, _)) => {
warn!(
self.log,
"Missing components over rpc";
"block_hash" => %block_root,
"slot" => %slot,
);
}
Err(e) => {
warn!(
self.log,
"Error when importing rpc blobs";
"error" => ?e,
"block_hash" => %block_root,
"slot" => %slot,
);
}
}

// Sync handles these results
self.send_sync_message(SyncMessage::BlockComponentProcessed {
process_type,
Expand Down
Loading