diff --git a/.cargo/config.toml b/.cargo/config.toml index a408305c4d1..dac01630032 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,4 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d84cd9615aa..81783267ba6 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -117,7 +117,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, + KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::mpsc::Receiver; @@ -1147,9 +1148,10 @@ impl BeaconChain { pub fn get_blobs_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result, Error> { + ) -> Result, Error> { self.early_attester_cache .get_blobs(*block_root) + .map(Into::into) .map_or_else(|| self.get_blobs(block_root), Ok) } @@ -1240,11 +1242,11 @@ impl BeaconChain { /// /// ## Errors /// May return a database error. - pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { - match self.store.get_blobs(block_root)? { - Some(blobs) => Ok(blobs), - None => Ok(BlobSidecarList::default()), - } + pub fn get_blobs( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.store.get_blobs(block_root).map_err(Error::from) } /// Returns the data columns at the given root, if any. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6c87deb8260..786b627bb7e 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -400,7 +400,7 @@ pub fn validate_blob_sidecar_for_gossip= T::EthSpec::max_blobs_per_block() as u64 { + if blob_index >= chain.spec.max_blobs_per_block(blob_epoch) { return Err(GossipBlobError::InvalidSubnet { expected: subnet, received: blob_index, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 420c83081c7..0bf3007e9b0 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -4,11 +4,10 @@ use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; -use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::BlobIdentifier; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -176,23 +175,6 @@ impl RpcBlock { }) } - pub fn new_from_fixed( - block_root: Hash256, - block: Arc>, - blobs: FixedBlobSidecarList, - ) -> Result { - let filtered = blobs - .into_iter() - .filter_map(|b| b.clone()) - .collect::>(); - let blobs = if filtered.is_empty() { - None - } else { - Some(VariableList::from(filtered)) - }; - Self::new(Some(block_root), block, blobs) - } - #[allow(clippy::type_complexity)] pub fn deconstruct( self, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index f6002ea0ac9..4c5152239c2 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -215,9 +215,12 @@ impl DataAvailabilityChecker { // Note: currently not reporting which specific blob is invalid because we fetch all blobs // from the same peer for both lookup and range sync. - let verified_blobs = - KzgVerifiedBlobList::new(blobs.iter().flatten().cloned(), &self.kzg, seen_timestamp) - .map_err(AvailabilityCheckError::InvalidBlobs)?; + let verified_blobs = KzgVerifiedBlobList::new( + blobs.into_vec().into_iter().flatten(), + &self.kzg, + seen_timestamp, + ) + .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) @@ -400,14 +403,13 @@ impl DataAvailabilityChecker { blocks: Vec>, ) -> Result>, AvailabilityCheckError> { let mut results = Vec::with_capacity(blocks.len()); - let all_blobs: BlobSidecarList = blocks + let all_blobs = blocks .iter() .filter(|block| self.blobs_required_for_block(block.as_block())) // this clone is cheap as it's cloning an Arc .filter_map(|block| block.blobs().cloned()) .flatten() - .collect::>() - .into(); + .collect::>(); // verify kzg for all blobs at once if !all_blobs.is_empty() { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 5ce023038d2..44148922f48 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -10,13 +10,12 @@ use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; use slog::{debug, Logger}; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, - Hash256, SignedBeaconBlock, + Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, }; /// This represents the components of a partially available block @@ -28,7 +27,7 @@ use types::{ #[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, - pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, + pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, @@ -41,9 +40,7 @@ impl PendingComponents { } /// Returns an immutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs( - &self, - ) -> &FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs(&self) -> &RuntimeFixedVector>> { &self.verified_blobs } @@ -64,9 +61,7 @@ impl PendingComponents { } /// Returns a mutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs_mut( - &mut self, - ) -> &mut FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs_mut(&mut self) -> &mut RuntimeFixedVector>> { &mut self.verified_blobs } @@ -138,10 +133,7 @@ impl PendingComponents { /// Blobs are only inserted if: /// 1. The blob entry at the index is empty and no block exists. /// 2. The block exists and its commitment matches the blob's commitment. - pub fn merge_blobs( - &mut self, - blobs: FixedVector>, E::MaxBlobsPerBlock>, - ) { + pub fn merge_blobs(&mut self, blobs: RuntimeFixedVector>>) { for (index, blob) in blobs.iter().cloned().enumerate() { let Some(blob) = blob else { continue }; self.merge_single_blob(index, blob); @@ -185,7 +177,7 @@ impl PendingComponents { /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { self.insert_block(block); - let reinsert = std::mem::take(self.get_cached_blobs_mut()); + let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -237,10 +229,10 @@ impl PendingComponents { } /// Returns an empty `PendingComponents` object with the given block root. - pub fn empty(block_root: Hash256) -> Self { + pub fn empty(block_root: Hash256, max_len: usize) -> Self { Self { block_root, - verified_blobs: FixedVector::default(), + verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], executed_block: None, reconstruction_started: false, @@ -299,7 +291,11 @@ impl PendingComponents { else { return Err(AvailabilityCheckError::Unexpected); }; - (Some(verified_blobs), None) + let max_len = spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; + ( + Some(RuntimeVariableList::new(verified_blobs, max_len)?), + None, + ) }; let executed_block = recover(diet_executed_block)?; @@ -341,10 +337,7 @@ impl PendingComponents { } if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { - let epoch = kzg_verified_data_column - .as_data_column() - .slot() - .epoch(E::slots_per_epoch()); + let epoch = kzg_verified_data_column.as_data_column().epoch(); return Some(epoch); } @@ -457,7 +450,18 @@ impl DataAvailabilityCheckerInner { kzg_verified_blobs: I, log: &Logger, ) -> Result, AvailabilityCheckError> { - let mut fixed_blobs = FixedVector::default(); + let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); + + let Some(epoch) = kzg_verified_blobs + .peek() + .map(|verified_blob| verified_blob.as_blob().epoch()) + else { + // Verified blobs list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + + let mut fixed_blobs = + RuntimeFixedVector::new(vec![None; self.spec.max_blobs_per_block(epoch) as usize]); for blob in kzg_verified_blobs { if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { @@ -471,7 +475,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); @@ -498,13 +504,24 @@ impl DataAvailabilityCheckerInner { kzg_verified_data_columns: I, log: &Logger, ) -> Result, AvailabilityCheckError> { + let mut kzg_verified_data_columns = kzg_verified_data_columns.into_iter().peekable(); + let Some(epoch) = kzg_verified_data_columns + .peek() + .map(|verified_blob| verified_blob.as_data_column().epoch()) + else { + // Verified data_columns list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; @@ -581,6 +598,7 @@ impl DataAvailabilityCheckerInner { log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); + let epoch = executed_block.as_block().epoch(); let block_root = executed_block.import_data.block_root; // register the block to get the diet block @@ -592,7 +610,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the block. pending_components.merge_block(diet_executed_block); @@ -812,7 +832,8 @@ mod test { info!(log, "done printing kzg commitments"); let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { - let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + let sidecars = + BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap(); Vec::from(sidecars) .into_iter() .map(|sidecar| { @@ -945,6 +966,8 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } } + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert!( cache.critical.read().is_empty(), "cache should be empty now that all components available" @@ -1125,7 +1148,7 @@ mod pending_components_tests { use super::*; use crate::block_verification_types::BlockImportData; use crate::eth1_finalization_cache::Eth1FinalizationData; - use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; @@ -1141,15 +1164,19 @@ mod pending_components_tests { type Setup = ( SignedBeaconBlock, - FixedVector>>, ::MaxBlobsPerBlock>, - FixedVector>>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>>, + RuntimeFixedVector>>>, + usize, ); pub fn pre_setup() -> Setup { let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let spec = test_spec::(); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); - let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng, &spec); + let max_len = spec.max_blobs_per_block(block.epoch()) as usize; + let mut blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for blob in blobs_vec { if let Some(b) = blobs.get_mut(blob.index as usize) { @@ -1157,10 +1184,8 @@ mod pending_components_tests { } } - let mut invalid_blobs: FixedVector< - Option>>, - ::MaxBlobsPerBlock, - > = FixedVector::default(); + let mut invalid_blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for (index, blob) in blobs.iter().enumerate() { if let Some(invalid_blob) = blob { let mut blob_copy = invalid_blob.as_ref().clone(); @@ -1169,21 +1194,21 @@ mod pending_components_tests { } } - (block, blobs, invalid_blobs) + (block, blobs, invalid_blobs, max_len) } type PendingComponentsSetup = ( DietAvailabilityPendingExecutedBlock, - FixedVector>, ::MaxBlobsPerBlock>, - FixedVector>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>, + RuntimeFixedVector>>, ); pub fn setup_pending_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + valid_blobs: RuntimeFixedVector>>>, + invalid_blobs: RuntimeFixedVector>>>, ) -> PendingComponentsSetup { - let blobs = FixedVector::from( + let blobs = RuntimeFixedVector::new( valid_blobs .iter() .map(|blob_opt| { @@ -1193,7 +1218,7 @@ mod pending_components_tests { }) .collect::>(), ); - let invalid_blobs = FixedVector::from( + let invalid_blobs = RuntimeFixedVector::new( invalid_blobs .iter() .map(|blob_opt| { @@ -1225,10 +1250,10 @@ mod pending_components_tests { (block.into(), blobs, invalid_blobs) } - pub fn assert_cache_consistent(cache: PendingComponents) { + pub fn assert_cache_consistent(cache: PendingComponents, max_len: usize) { if let Some(cached_block) = cache.get_cached_block() { let cached_block_commitments = cached_block.get_commitments(); - for index in 0..E::max_blobs_per_block() { + for index in 0..max_len { let block_commitment = cached_block_commitments.get(index).copied(); let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); @@ -1247,40 +1272,40 @@ mod pending_components_tests { #[test] fn valid_block_invalid_blobs_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_block_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_valid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); cache.merge_block(block_commitments); @@ -1290,46 +1315,46 @@ mod pending_components_tests { #[test] fn block_valid_blobs_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_block_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_invalid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } } diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index f740b693fbf..f1646072c96 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -21,8 +21,8 @@ use std::sync::Arc; use tokio::sync::mpsc::Receiver; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::{ - BeaconStateError, BlobSidecar, DataColumnSidecar, DataColumnSidecarList, EthSpec, FullPayload, - Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, + BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, }; pub enum BlobsOrDataColumns { @@ -112,6 +112,7 @@ pub async fn fetch_and_process_engine_blobs( response, signed_block_header, &kzg_commitments_proof, + &chain.spec, )?; let num_fetched_blobs = fixed_blob_sidecar_list @@ -275,8 +276,11 @@ fn build_blob_sidecars( response: Vec>>, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &FixedVector, + spec: &ChainSpec, ) -> Result, FetchEngineBlobError> { - let mut fixed_blob_sidecar_list = FixedBlobSidecarList::default(); + let epoch = block.epoch(); + let mut fixed_blob_sidecar_list = + FixedBlobSidecarList::default(spec.max_blobs_per_block(epoch) as usize); for (index, blob_and_proof) in response .into_iter() .enumerate() diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index bd47e82215e..e32ee9c24bd 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -194,9 +194,11 @@ fn build_data_column_sidecars( spec: &ChainSpec, ) -> Result, String> { let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let max_blobs_per_block = spec + .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) + as usize; + let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { // we iterate over each column, and we construct the column from "top to bottom", @@ -253,6 +255,7 @@ pub fn reconstruct_blobs( data_columns: &[Arc>], blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, + spec: &ChainSpec, ) -> Result, String> { // The data columns are from the database, so we assume their correctness. let first_data_column = data_columns @@ -315,10 +318,11 @@ pub fn reconstruct_blobs( .map(Arc::new) .map_err(|e| format!("{e:?}")) }) - .collect::, _>>()? - .into(); + .collect::, _>>()?; + + let max_blobs = spec.max_blobs_per_block(signed_block.epoch()) as usize; - Ok(blob_sidecars) + BlobSidecarList::new(blob_sidecars, max_blobs).map_err(|e| format!("{e:?}")) } /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). @@ -478,6 +482,7 @@ mod test { &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], Some(blob_indices.clone()), &signed_blinded_block, + spec, ) .unwrap(); diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index a9f46640645..48989e07d3d 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -24,7 +24,7 @@ pub trait ObservableDataSidecar { fn slot(&self) -> Slot; fn block_proposer_index(&self) -> u64; fn index(&self) -> u64; - fn max_num_of_items(spec: &ChainSpec) -> usize; + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize; } impl ObservableDataSidecar for BlobSidecar { @@ -40,8 +40,8 @@ impl ObservableDataSidecar for BlobSidecar { self.index } - fn max_num_of_items(_spec: &ChainSpec) -> usize { - E::max_blobs_per_block() + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize { + spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize } } @@ -58,7 +58,7 @@ impl ObservableDataSidecar for DataColumnSidecar { self.index } - fn max_num_of_items(spec: &ChainSpec) -> usize { + fn max_num_of_items(spec: &ChainSpec, _slot: Slot) -> usize { spec.number_of_columns } } @@ -103,7 +103,9 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .or_insert_with(|| HashSet::with_capacity(T::max_num_of_items(&self.spec))); + .or_insert_with(|| { + HashSet::with_capacity(T::max_num_of_items(&self.spec, data_sidecar.slot())) + }); let did_not_exist = data_indices.insert(data_sidecar.index()); Ok(!did_not_exist) @@ -123,7 +125,7 @@ impl ObservedDataSidecars { } fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { - if data_sidecar.index() >= T::max_num_of_items(&self.spec) as u64 { + if data_sidecar.index() >= T::max_num_of_items(&self.spec, data_sidecar.slot()) as u64 { return Err(Error::InvalidDataIndex(data_sidecar.index())); } let finalized_slot = self.finalized_slot; @@ -179,7 +181,7 @@ mod tests { use crate::test_utils::test_spec; use bls::Hash256; use std::sync::Arc; - use types::MainnetEthSpec; + use types::{Epoch, MainnetEthSpec}; type E = MainnetEthSpec; @@ -333,7 +335,7 @@ mod tests { #[test] fn simple_observations() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec); + let mut cache = ObservedDataSidecars::>::new(spec.clone()); // Slot 0, index 0 let proposer_index_a = 420; @@ -489,7 +491,7 @@ mod tests { ); // Try adding an out of bounds index - let invalid_index = E::max_blobs_per_block() as u64; + let invalid_index = spec.max_blobs_per_block(Epoch::new(0)); let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d37398e4e02..fd3cc496260 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -514,7 +514,7 @@ where pub fn mock_execution_layer_with_config(mut self) -> Self { let mock = mock_execution_layer_from_parts::( - self.spec.as_ref().expect("cannot build without spec"), + self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), ); self.execution_layer = Some(mock.el.clone()); @@ -614,7 +614,7 @@ where } pub fn mock_execution_layer_from_parts( - spec: &ChainSpec, + spec: Arc, task_executor: TaskExecutor, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { @@ -630,7 +630,7 @@ pub fn mock_execution_layer_from_parts( HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); - let kzg = get_kzg(spec); + let kzg = get_kzg(&spec); MockExecutionLayer::new( task_executor, @@ -640,7 +640,7 @@ pub fn mock_execution_layer_from_parts( prague_time, osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec.clone(), + spec, Some(kzg), ) } @@ -749,15 +749,15 @@ where pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - let blobs = self.chain.get_blobs(&block_root).unwrap(); - RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); + RpcBlock::new(Some(block_root), block, blobs).unwrap() } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - let blobs = self.chain.get_blobs(block_root).unwrap(); - RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(block_root).unwrap().blobs(); + RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap() } pub fn get_all_validators(&self) -> Vec { @@ -2020,7 +2020,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_hash: SignedBeaconBlockHash = self @@ -2046,7 +2046,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_root = block.canonical_root(); @@ -2817,11 +2817,12 @@ pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, + spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); - + let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_sidecars = vec![]; let bundle = match block { @@ -2831,7 +2832,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2851,7 +2852,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2870,7 +2871,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2924,7 +2925,7 @@ pub fn generate_rand_block_and_data_columns( DataColumnSidecarList, ) { let kzg = get_kzg(spec); - let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); + let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 87fefe71146..60001159938 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -155,7 +155,7 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); - let blobs = chain.get_blobs(&block_root).unwrap(); + let blobs = chain.get_blobs(&block_root).unwrap().blobs(); let epoch_boundary_slot = state .current_epoch() @@ -223,7 +223,7 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let rpc_block = - RpcBlock::::new(None, Arc::new(block.clone()), Some(blobs.clone())) + RpcBlock::::new(None, Arc::new(block.clone()), blobs.clone()) .unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( available_block, @@ -299,10 +299,11 @@ async fn early_attester_cache_old_request() { let head_blobs = harness .chain .get_blobs(&head.beacon_block_root) - .expect("should get blobs"); + .expect("should get blobs") + .blobs(); let rpc_block = - RpcBlock::::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap(); + RpcBlock::::new(None, head.beacon_block.clone(), head_blobs).unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness .chain diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 103734b2247..b61f758cac5 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -65,12 +65,13 @@ async fn get_chain_segment() -> (Vec>, Vec( signed_block: &SignedBeaconBlock, blobs: &mut BlobSidecarList, ) { - for old_blob_sidecar in blobs.iter_mut() { + for old_blob_sidecar in blobs.as_mut_slice() { let new_blob = Arc::new(BlobSidecar:: { index: old_blob_sidecar.index, blob: old_blob_sidecar.blob.clone(), @@ -1223,7 +1225,7 @@ async fn verify_block_for_gossip_slashing_detection() { let slasher = Arc::new( Slasher::open( SlasherConfig::new(slasher_dir.path().into()), - spec, + spec.clone(), test_logger(), ) .unwrap(), @@ -1247,7 +1249,7 @@ async fn verify_block_for_gossip_slashing_detection() { if let Some((kzg_proofs, blobs)) = blobs1 { let sidecars = - BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs, &spec).unwrap(); for sidecar in sidecars { let blob_index = sidecar.index; let verified_blob = harness diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index ab784d3be45..c9bd55e0620 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -73,7 +73,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let blob_1 = Arc::new(blob_1); let blob_2 = Arc::new(blob_2); - let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let blobs = FixedBlobSidecarList::new(vec![Some(blob_1.clone()), Some(blob_2.clone())]); let expected_sse_blobs = vec![ SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ed97b8d6345..60d46e8269d 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2317,7 +2317,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_full_block(&wss_block_root) .unwrap() .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_blobs_opt = harness + .chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2342,8 +2347,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let kzg = get_kzg(&spec); - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); // Initialise a new beacon chain from the finalized checkpoint. // The slot clock must be set to a time ahead of the checkpoint state. @@ -2388,7 +2395,11 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let store_wss_blobs_opt = beacon_chain.store.get_blobs(&wss_block_root).unwrap(); + let store_wss_blobs_opt = beacon_chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); assert_eq!(store_wss_block, wss_block); assert_eq!(store_wss_blobs_opt, wss_blobs_opt); @@ -2407,7 +2418,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2415,7 +2426,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2469,13 +2480,13 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .expect("should get block") .expect("should get block"); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker .verify_kzg_for_rpc_block( - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), ) .expect("should verify kzg") { @@ -3351,7 +3362,7 @@ fn check_blob_existence( .unwrap() .map(Result::unwrap) { - if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() { + if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap().blobs() { assert!(should_exist, "blobs at slot {slot} exist but should not"); blobs_seen += blobs.len(); } else { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 24c66158225..1cd9e89b96c 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -36,7 +36,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use ssz::Decode; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -361,10 +360,11 @@ where let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; let anchor_blobs = if anchor_block.message().body().has_blobs() { + let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; let anchor_blobs_bytes = anchor_blobs_bytes .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; Some( - BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes) + BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes, max_blobs_len) .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, ) } else { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1fd9f81d46f..daf2bf6ed4b 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1383,7 +1383,8 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { - let server = MockServer::unit_testing(); + let spec = Arc::new(MainnetEthSpec::default_spec()); + let server = MockServer::unit_testing(spec); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 2a39796707b..9fa375b3757 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -154,6 +154,7 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, + spec: Arc, } fn make_rng() -> Arc> { @@ -172,6 +173,7 @@ impl ExecutionBlockGenerator { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { let mut gen = Self { @@ -192,6 +194,7 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), + spec, }; gen.insert_pow_block(0).unwrap(); @@ -697,7 +700,11 @@ impl ExecutionBlockGenerator { if execution_payload.fork_name().deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let max_blobs = self + .spec + .max_blobs_per_block_by_fork(execution_payload.fork_name()) + as usize; + let num_blobs = rng.gen::() % (max_blobs + 1); let (bundle, transactions) = generate_blobs(num_blobs)?; for tx in Vec::from(transactions) { execution_payload @@ -906,6 +913,7 @@ mod test { const TERMINAL_DIFFICULTY: u64 = 10; const TERMINAL_BLOCK: u64 = 10; const DIFFICULTY_INCREMENT: u64 = 1; + let spec = Arc::new(MainnetEthSpec::default_spec()); let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( Uint256::from(TERMINAL_DIFFICULTY), @@ -915,6 +923,7 @@ mod test { None, None, None, + spec, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 9df8d9cc5cf..f45bfda9ffa 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -13,7 +13,7 @@ pub struct MockExecutionLayer { pub server: MockServer, pub el: ExecutionLayer, pub executor: TaskExecutor, - pub spec: ChainSpec, + pub spec: Arc, } impl MockExecutionLayer { @@ -30,7 +30,7 @@ impl MockExecutionLayer { None, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec, + Arc::new(spec), None, ) } @@ -44,7 +44,7 @@ impl MockExecutionLayer { prague_time: Option, osaka_time: Option, jwt_key: Option, - spec: ChainSpec, + spec: Arc, kzg: Option>, ) -> Self { let handle = executor.handle().unwrap(); @@ -60,6 +60,7 @@ impl MockExecutionLayer { cancun_time, prague_time, osaka_time, + spec.clone(), kzg, ); @@ -323,7 +324,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(Arc, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 5934c069a2f..75ff4358865 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -21,7 +21,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, ExecutionBlockHash, Uint256}; +use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; @@ -111,7 +111,7 @@ pub struct MockServer { } impl MockServer { - pub fn unit_testing() -> Self { + pub fn unit_testing(chain_spec: Arc) -> Self { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), @@ -122,6 +122,7 @@ impl MockServer { None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? None, // FIXME(fulu): should this be the default? + chain_spec, None, ) } @@ -129,6 +130,7 @@ impl MockServer { pub fn new_with_config( handle: &runtime::Handle, config: MockExecutionConfig, + spec: Arc, kzg: Option>, ) -> Self { let MockExecutionConfig { @@ -152,6 +154,7 @@ impl MockServer { cancun_time, prague_time, osaka_time, + spec, kzg, ); @@ -216,6 +219,7 @@ impl MockServer { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { Self::new_with_config( @@ -231,6 +235,7 @@ impl MockServer { prague_time, osaka_time, }, + spec, kzg, ) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index b9e48833184..0b00958f26a 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -287,14 +287,16 @@ impl BlockId { })?; // Return the `BlobSidecarList` identified by `self`. + let max_blobs_per_block = chain.spec.max_blobs_per_block(block.epoch()) as usize; let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? } else { - Self::get_blobs(chain, root, query.indices)? + Self::get_blobs(chain, root, query.indices, max_blobs_per_block)? } } else { - BlobSidecarList::default() + BlobSidecarList::new(vec![], max_blobs_per_block) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? }; Ok((block, blob_sidecar_list, execution_optimistic, finalized)) @@ -304,22 +306,25 @@ impl BlockId { chain: &BeaconChain, root: Hash256, indices: Option>, + max_blobs_per_block: usize, ) -> Result, Rejection> { let blob_sidecar_list = chain .store .get_blobs(&root) .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .blobs() .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) })?; let blob_sidecar_list_filtered = match indices { Some(vec) => { - let list = blob_sidecar_list + let list: Vec<_> = blob_sidecar_list .into_iter() .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) .collect(); - BlobSidecarList::new(list) + + BlobSidecarList::new(list, max_blobs_per_block) .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? } None => blob_sidecar_list, @@ -356,11 +361,13 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Error reconstructing data columns: {e:?}" - )) - }) + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }, + ) } else { Err(warp_utils::reject::custom_server_error( format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 8e0a51a32a2..db4ef002579 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1460,7 +1460,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { let blobs = blobs.expect("should have some blobs"); assert!( blobs.0.len() >= 2, - "need at least 2 blobs for partial reveal" + "need at least 2 blobs for partial reveal, got: {}", + blobs.0.len() ); let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index c3d20bbfb17..61b2699ac5a 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -186,6 +186,7 @@ impl Decoder for SSZSnappyInboundCodec { handle_rpc_request( self.protocol.versioned_protocol, &decoded_buffer, + self.fork_context.current_fork(), &self.fork_context.spec, ) } @@ -555,6 +556,7 @@ fn handle_length( fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], + current_fork: ForkName, spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { @@ -586,9 +588,23 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( - BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), + SupportedProtocol::BlobsByRangeV1 => { + let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?; + let max_requested_blobs = req + .count + .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + // TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR + if max_requested_blobs > spec.max_request_blob_sidecars { + return Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "requested exceeded limit. allowed: {}, requested: {}", + spec.max_request_blob_sidecars, max_requested_blobs + ), + )); + } + Ok(Some(RequestType::BlobsByRange(req))) + } SupportedProtocol::BlobsByRootV1 => { Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 7b3a59eac7e..75d49e9cb5f 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -110,8 +110,8 @@ impl RateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 0a0a6ca754f..3a008df023d 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -855,7 +855,8 @@ where } let (req, substream) = substream; - let max_responses = req.max_responses(); + let max_responses = + req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); // store requests that expect responses if max_responses > 0 { @@ -924,7 +925,8 @@ where } // add the stream to substreams if we expect a response, otherwise drop the stream. - let max_responses = request.max_responses(); + let max_responses = + request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index bb8bfb0e206..500188beefb 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,6 +15,7 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; +use types::ForkName; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, @@ -327,8 +328,9 @@ pub struct BlobsByRangeRequest { } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(E::max_blobs_per_block() as u64) + pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork); + self.count.saturating_mul(max_blobs_per_block) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7d091da7660..03f1395b8b5 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -181,12 +181,13 @@ impl RPC { let inbound_limiter = inbound_rate_limiter_config.map(|config| { debug!(log, "Using inbound rate limiting params"; "config" => ?config); - RateLimiter::new_with_config(config.0) + RateLimiter::new_with_config(config.0, fork_context.clone()) .expect("Inbound limiter configuration parameters are valid") }); let self_limiter = outbound_rate_limiter_config.map(|config| { - SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + SelfRateLimiter::new(config, fork_context.clone(), log.clone()) + .expect("Configuration parameters are valid") }); RPC { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 87bde58292b..681b739d598 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -86,6 +86,10 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyL /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// with `max_chunk_size`. +/// +/// FIXME: Given that these limits are useless we should probably delete them. See: +/// +/// https://github.com/sigp/lighthouse/issues/6790 pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -102,7 +106,6 @@ pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. // @@ -110,7 +113,6 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_electra_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. @@ -118,8 +120,6 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_fulu_size() + ssz::BYTES_PER_LENGTH_OFFSET - + (::ssz_fixed_len() - * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); @@ -129,14 +129,6 @@ pub static BLOB_SIDECAR_SIZE: LazyLock = pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = LazyLock::new(BlobSidecar::::max_size); -pub static DATA_COLUMNS_SIDECAR_MIN: LazyLock = LazyLock::new(|| { - DataColumnSidecar::::empty() - .as_ssz_bytes() - .len() -}); -pub static DATA_COLUMNS_SIDECAR_MAX: LazyLock = - LazyLock::new(DataColumnSidecar::::max_size); - pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -635,8 +627,10 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), - Protocol::DataColumnsByRoot => rpc_data_column_limits(), - Protocol::DataColumnsByRange => rpc_data_column_limits(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(fork_context.current_fork()), + Protocol::DataColumnsByRange => { + rpc_data_column_limits::(fork_context.current_fork()) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -716,8 +710,14 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -pub fn rpc_data_column_limits() -> RpcLimits { - RpcLimits::new(*DATA_COLUMNS_SIDECAR_MIN, *DATA_COLUMNS_SIDECAR_MAX) +// TODO(peerdas): fix hardcoded max here +pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size( + E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize + ), + ) } /* Inbound upgrade */ @@ -815,13 +815,13 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { + pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index ecbacc8c112..b9e82a5f1ee 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -6,10 +6,11 @@ use serde::{Deserialize, Serialize}; use std::future::Future; use std::hash::Hash; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::EthSpec; +use types::{ChainSpec, EthSpec, ForkContext, ForkName}; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -109,6 +110,7 @@ pub struct RPCRateLimiter { lc_finality_update_rl: Limiter, /// LightClientUpdatesByRange rate limiter. lc_updates_by_range_rl: Limiter, + fork_context: Arc, } /// Error type for non conformant requests @@ -176,7 +178,7 @@ impl RPCRateLimiterBuilder { self } - pub fn build(self) -> Result { + pub fn build(self, fork_context: Arc) -> Result { // get our quotas let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?; let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?; @@ -253,13 +255,14 @@ impl RPCRateLimiterBuilder { lc_finality_update_rl, lc_updates_by_range_rl, init_time: Instant::now(), + fork_context, }) } } pub trait RateLimiterItem { fn protocol(&self) -> Protocol; - fn max_responses(&self) -> u64; + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64; } impl RateLimiterItem for super::RequestType { @@ -267,13 +270,16 @@ impl RateLimiterItem for super::RequestType { self.versioned_protocol().protocol() } - fn max_responses(&self) -> u64 { - self.max_responses() + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + self.max_responses(current_fork, spec) } } impl RPCRateLimiter { - pub fn new_with_config(config: RateLimiterConfig) -> Result { + pub fn new_with_config( + config: RateLimiterConfig, + fork_context: Arc, + ) -> Result { // Destructure to make sure every configuration value is used. let RateLimiterConfig { ping_quota, @@ -316,7 +322,7 @@ impl RPCRateLimiter { Protocol::LightClientUpdatesByRange, light_client_updates_by_range_quota, ) - .build() + .build(fork_context) } /// Get a builder instance. @@ -330,7 +336,9 @@ impl RPCRateLimiter { request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); - let tokens = request.max_responses().max(1); + let tokens = request + .max_responses(self.fork_context.current_fork(), &self.fork_context.spec) + .max(1); let check = |limiter: &mut Limiter| limiter.allows(time_since_start, peer_id, tokens); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e968ad11e3d..e0c8593f29f 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -1,5 +1,6 @@ use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, + sync::Arc, task::{Context, Poll}, time::Duration, }; @@ -9,7 +10,7 @@ use libp2p::{swarm::NotifyHandler, PeerId}; use slog::{crit, debug, Logger}; use smallvec::SmallVec; use tokio_util::time::DelayQueue; -use types::EthSpec; +use types::{EthSpec, ForkContext}; use super::{ config::OutboundRateLimiterConfig, @@ -50,9 +51,13 @@ pub enum Error { impl SelfRateLimiter { /// Creates a new [`SelfRateLimiter`] based on configration values. - pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + pub fn new( + config: OutboundRateLimiterConfig, + fork_context: Arc, + log: Logger, + ) -> Result { debug!(log, "Using self rate limiting params"; "config" => ?config); - let limiter = RateLimiter::new_with_config(config.0)?; + let limiter = RateLimiter::new_with_config(config.0, fork_context)?; Ok(SelfRateLimiter { delayed_requests: Default::default(), @@ -215,7 +220,7 @@ mod tests { use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; - use types::MainnetEthSpec; + use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; /// Test that `next_peer_request_ready` correctly maintains the queue. #[tokio::test] @@ -225,8 +230,13 @@ mod tests { ping_quota: Quota::n_every(1, 2), ..Default::default() }); + let fork_context = std::sync::Arc::new(ForkContext::new::( + Slot::new(0), + Hash256::ZERO, + &MainnetEthSpec::default_spec(), + )); let mut limiter: SelfRateLimiter = - SelfRateLimiter::new(config, log).unwrap(); + SelfRateLimiter::new(config, fork_context, log).unwrap(); let peer_id = PeerId::random(); for i in 1..=5u32 { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index c4944078fef..b4f19f668d7 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -890,14 +890,6 @@ impl NetworkBeaconProcessor { "start_slot" => req.start_slot, ); - // Should not send more than max request blocks - if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { - return Err(( - RpcErrorResponse::InvalidRequest, - "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", - )); - } - let request_start_slot = Slot::from(req.start_slot); let data_availability_boundary_slot = match self.chain.data_availability_boundary() { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 7e27a91bd6b..8238fa146dd 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -259,7 +259,7 @@ impl TestRig { assert!(beacon_processor.is_ok()); let block = next_block_tuple.0; let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { - Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap()) } else { None }; @@ -344,7 +344,7 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { - let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); + let blobs = FixedBlobSidecarList::new(blobs.into_iter().map(Some).collect::>()); self.network_beacon_processor .send_rpc_blobs( self.next_block.canonical_root(), @@ -1130,7 +1130,12 @@ async fn test_blobs_by_range() { .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) .unwrap(); blob_count += root - .map(|root| rig.chain.get_blobs(&root).unwrap_or_default().len()) + .map(|root| { + rig.chain + .get_blobs(&root) + .map(|list| list.len()) + .unwrap_or(0) + }) .unwrap_or(0); } let mut actual_count = 0; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 966ce55fabe..7a234eaef04 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -2,13 +2,13 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; use lighthouse_network::PeerId; -use ssz_types::VariableList; use std::{ collections::{HashMap, VecDeque}, sync::Arc, }; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, RuntimeVariableList, + SignedBeaconBlock, }; #[derive(Debug)] @@ -31,6 +31,7 @@ pub struct RangeBlockComponentsRequest { num_custody_column_requests: Option, /// The peers the request was made to. pub(crate) peer_ids: Vec, + max_blobs_per_block: usize, } impl RangeBlockComponentsRequest { @@ -39,6 +40,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns: Option>, num_custody_column_requests: Option, peer_ids: Vec, + max_blobs_per_block: usize, ) -> Self { Self { blocks: <_>::default(), @@ -51,6 +53,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns, num_custody_column_requests, peer_ids, + max_blobs_per_block, } } @@ -100,7 +103,7 @@ impl RangeBlockComponentsRequest { let mut responses = Vec::with_capacity(blocks.len()); let mut blob_iter = blobs.into_iter().peekable(); for block in blocks.into_iter() { - let mut blob_list = Vec::with_capacity(E::max_blobs_per_block()); + let mut blob_list = Vec::with_capacity(self.max_blobs_per_block); while { let pair_next_blob = blob_iter .peek() @@ -111,7 +114,7 @@ impl RangeBlockComponentsRequest { blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); } - let mut blobs_buffer = vec![None; E::max_blobs_per_block()]; + let mut blobs_buffer = vec![None; self.max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { @@ -123,7 +126,11 @@ impl RangeBlockComponentsRequest { *blob_opt = Some(blob); } } - let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::>()); + let blobs = RuntimeVariableList::new( + blobs_buffer.into_iter().flatten().collect::>(), + self.max_blobs_per_block, + ) + .map_err(|_| "Blobs returned exceeds max length".to_string())?; responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) } @@ -245,12 +252,18 @@ mod tests { #[test] fn no_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) - .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) + .map(|_| { + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) + .0 + }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -265,15 +278,24 @@ mod tests { #[test] fn empty_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { // Always generate some blobs. - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng).0 + generate_rand_block_and_blobs::( + ForkName::Deneb, + NumBlobs::Number(3), + &mut rng, + &spec, + ) + .0 }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -294,12 +316,7 @@ mod tests { fn rpc_block_with_custody_columns() { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(expects_custody_columns.len()), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -311,7 +328,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(expects_custody_columns.len()), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); @@ -355,12 +379,7 @@ mod tests { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; let num_of_data_column_requests = 2; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -372,7 +391,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 5d02be2b4c1..2df8b5f94c0 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1234,6 +1234,7 @@ impl SyncManager { .network .range_block_and_blob_response(id, block_or_blob) { + let epoch = resp.sender_id.batch_id(); match resp.responses { Ok(blocks) => { match resp.sender_id { @@ -1277,6 +1278,7 @@ impl SyncManager { resp.expects_custody_columns, None, vec![], + self.chain.spec.max_blobs_per_block(epoch) as usize, ), ); // inform range that the request needs to be treated as failed diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b6b7b315f3f..e1b2b974ec4 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -67,6 +67,15 @@ pub enum RangeRequestId { }, } +impl RangeRequestId { + pub fn batch_id(&self) -> BatchId { + match self { + RangeRequestId::RangeSync { batch_id, .. } => *batch_id, + RangeRequestId::BackfillSync { batch_id, .. } => *batch_id, + } + } +} + #[derive(Debug)] pub enum RpcEvent { StreamTermination, @@ -445,11 +454,14 @@ impl SyncNetworkContext { (None, None) }; + // TODO(pawan): this would break if a batch contains multiple epochs + let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); let info = RangeBlockComponentsRequest::new( expected_blobs, expects_columns, num_of_column_req, requested_peers, + max_blobs_len as usize, ); self.range_block_components_requests .insert(id, (sender_id, info)); @@ -950,12 +962,23 @@ impl SyncNetworkContext { ) -> Option>> { let response = self.blobs_by_root_requests.on_response(id, rpc_event); let response = response.map(|res| { - res.and_then( - |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { - Ok(blobs) => Ok((blobs, seen_timestamp)), - Err(e) => Err(e.into()), - }, - ) + res.and_then(|(blobs, seen_timestamp)| { + if let Some(max_len) = blobs + .first() + .map(|blob| self.chain.spec.max_blobs_per_block(blob.epoch()) as usize) + { + match to_fixed_blob_sidecar_list(blobs, max_len) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + } + } else { + Err(RpcResponseError::VerifyError( + LookupVerifyError::InternalError( + "Requested blobs for a block that has no blobs".to_string(), + ), + )) + } + }) }); if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); @@ -1150,8 +1173,9 @@ impl SyncNetworkContext { fn to_fixed_blob_sidecar_list( blobs: Vec>>, + max_len: usize, ) -> Result, LookupVerifyError> { - let mut fixed_list = FixedBlobSidecarList::default(); + let mut fixed_list = FixedBlobSidecarList::new(vec![None; max_len]); for blob in blobs.into_iter() { let index = blob.index as usize; *fixed_list diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index b9214bafcd7..4a5a16459d3 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -28,6 +28,7 @@ pub enum LookupVerifyError { UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, + InternalError(String), } /// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index a43b3bd022f..b9e38237c58 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -119,6 +119,8 @@ impl TestRig { .network_globals .set_sync_state(SyncState::Synced); + let spec = chain.spec.clone(); + let rng = XorShiftRng::from_seed([42; 16]); TestRig { beacon_processor_rx, @@ -142,6 +144,7 @@ impl TestRig { harness, fork_name, log, + spec, } } @@ -213,7 +216,7 @@ impl TestRig { ) -> (SignedBeaconBlock, Vec>) { let fork_name = self.fork_name; let rng = &mut self.rng; - generate_rand_block_and_blobs::(fork_name, num_blobs, rng) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng, &self.spec) } fn rand_block_and_data_columns( @@ -1328,8 +1331,10 @@ impl TestRig { #[test] fn stable_rng() { + let spec = types::MainnetEthSpec::default_spec(); let mut rng = XorShiftRng::from_seed([42; 16]); - let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); + let (block, _) = + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec); assert_eq!( block.canonical_root(), Hash256::from_slice( @@ -2187,8 +2192,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; - use ssz_types::VariableList; use std::collections::VecDeque; + use types::RuntimeVariableList; struct DenebTester { rig: TestRig, @@ -2546,12 +2551,15 @@ mod deneb_only { fn parent_block_unknown_parent(mut self) -> Self { self.rig.log("parent_block_unknown_parent"); let block = self.unknown_parent_block.take().unwrap(); + let max_len = self.rig.spec.max_blobs_per_block(block.epoch()) as usize; // Now this block is the one we expect requests from self.block = block.clone(); let block = RpcBlock::new( Some(block.canonical_root()), block, - self.unknown_parent_blobs.take().map(VariableList::from), + self.unknown_parent_blobs + .take() + .map(|vec| RuntimeVariableList::from_vec(vec, max_len)), ) .unwrap(); self.rig.parent_block_processed( diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 47666b413c5..6ed5c7f8fab 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -12,7 +12,7 @@ use slot_clock::ManualSlotClock; use std::sync::Arc; use store::MemoryStore; use tokio::sync::mpsc; -use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; +use types::{test_utils::XorShiftRng, ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; mod range; @@ -64,4 +64,5 @@ struct TestRig { rng: XorShiftRng, fork_name: ForkName, log: Logger, + spec: Arc, } diff --git a/beacon_node/store/src/blob_sidecar_list_from_root.rs b/beacon_node/store/src/blob_sidecar_list_from_root.rs new file mode 100644 index 00000000000..de63eaa76ce --- /dev/null +++ b/beacon_node/store/src/blob_sidecar_list_from_root.rs @@ -0,0 +1,42 @@ +use std::sync::Arc; +use types::{BlobSidecar, BlobSidecarList, EthSpec}; + +#[derive(Debug, Clone)] +pub enum BlobSidecarListFromRoot { + /// Valid root that exists in the DB, but has no blobs associated with it. + NoBlobs, + /// Contains > 1 blob for the requested root. + Blobs(BlobSidecarList), + /// No root exists in the db or cache for the requested root. + NoRoot, +} + +impl From> for BlobSidecarListFromRoot { + fn from(value: BlobSidecarList) -> Self { + Self::Blobs(value) + } +} + +impl BlobSidecarListFromRoot { + pub fn blobs(self) -> Option> { + match self { + Self::NoBlobs | Self::NoRoot => None, + Self::Blobs(blobs) => Some(blobs), + } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + match self { + Self::NoBlobs | Self::NoRoot => 0, + Self::Blobs(blobs) => blobs.len(), + } + } + + pub fn iter(&self) -> impl Iterator>> { + match self { + Self::NoBlobs | Self::NoRoot => [].iter(), + Self::Blobs(list) => list.iter(), + } + } +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c6148e53144..c29305f9831 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, - KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, get_key_for_col, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, + ItemStore, KeyValueStoreOp, StoreItem, StoreOp, }; use crate::{metrics, parse_data_column_key}; use itertools::{process_results, Itertools}; @@ -1280,9 +1280,10 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutBlobs(_, _) | StoreOp::PutDataColumns(_, _) => true, StoreOp::DeleteBlobs(block_root) => { match self.get_blobs(block_root) { - Ok(Some(blob_sidecar_list)) => { + Ok(BlobSidecarListFromRoot::Blobs(blob_sidecar_list)) => { blobs_to_delete.push((*block_root, blob_sidecar_list)); } + Ok(BlobSidecarListFromRoot::NoBlobs | BlobSidecarListFromRoot::NoRoot) => {} Err(e) => { error!( self.log, "Error getting blobs"; @@ -1290,7 +1291,6 @@ impl, Cold: ItemStore> HotColdDB "error" => ?e ); } - _ => (), } true } @@ -2045,11 +2045,11 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch blobs for a given block from the store. - pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); - return Ok(Some(blobs.clone())); + return Ok(blobs.clone().into()); } match self @@ -2057,13 +2057,27 @@ impl, Cold: ItemStore> HotColdDB .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? { Some(ref blobs_bytes) => { - let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; - self.block_cache - .lock() - .put_blobs(*block_root, blobs.clone()); - Ok(Some(blobs)) + // We insert a VariableList of BlobSidecars into the db, but retrieve + // a plain vec since we don't know the length limit of the list without + // knowing the slot. + // The encoding of a VariableList is the same as a regular vec. + let blobs: Vec>> = Vec::<_>::from_ssz_bytes(blobs_bytes)?; + if let Some(max_blobs_per_block) = blobs + .first() + .map(|blob| self.spec.max_blobs_per_block(blob.epoch())) + { + let blobs = BlobSidecarList::from_vec(blobs, max_blobs_per_block as usize); + self.block_cache + .lock() + .put_blobs(*block_root, blobs.clone()); + + Ok(BlobSidecarListFromRoot::Blobs(blobs)) + } else { + // This always implies that there were no blobs for this block_root + Ok(BlobSidecarListFromRoot::NoBlobs) + } } - None => Ok(None), + None => Ok(BlobSidecarListFromRoot::NoRoot), } } diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index 5c60aa8d7e3..097b069a665 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ - BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, }; @@ -27,7 +27,6 @@ impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); impl_store_item!(ExecutionPayloadFulu); -impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. /// diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 09ae9a32dd0..1458fa846c6 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,6 +7,7 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +pub mod blob_sidecar_list_from_root; pub mod chunked_iter; pub mod chunked_vector; pub mod config; @@ -28,6 +29,7 @@ pub mod state_cache; pub mod iter; +pub use self::blob_sidecar_list_from_root::BlobSidecarListFromRoot; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index a107f6147ae..a303bea2681 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -135,6 +135,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index f71984059aa..68d2b0eafe2 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -118,6 +118,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 6d344b5b524..930ce0a1bcb 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -124,6 +124,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 244ddd564d2..638f6fe42f8 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -141,6 +141,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 88f8359bd13..38185188976 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -119,6 +119,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 22e0a5eab30..782dbe2a547 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -391,10 +391,12 @@ pub fn partially_verify_execution_payload = VariableList::MaxBlobCommitmentsPerBlock>; -pub type KzgCommitmentOpts = - FixedVector, ::MaxBlobsPerBlock>; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 302aa2a4c18..ff4555747c6 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,10 +1,10 @@ use crate::test_utils::TestRandom; use crate::{ - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, - Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, AbstractExecPayload, BeaconBlockHeader, + BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, + ForkVersionDeserialize, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, }; -use crate::{AbstractExecPayload, ForkName}; -use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; @@ -30,19 +30,6 @@ pub struct BlobIdentifier { pub index: u64, } -impl BlobIdentifier { - pub fn get_all_blob_ids(block_root: Hash256) -> Vec { - let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block()); - for i in 0..E::max_blobs_per_block() { - blob_ids.push(BlobIdentifier { - block_root, - index: i as u64, - }); - } - blob_ids - } -} - impl PartialOrd for BlobIdentifier { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -291,19 +278,23 @@ impl BlobSidecar { blobs: BlobsList, block: &SignedBeaconBlock, kzg_proofs: KzgProofs, + spec: &ChainSpec, ) -> Result, BlobSidecarError> { let mut blob_sidecars = vec![]; for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; blob_sidecars.push(Arc::new(blob_sidecar)); } - Ok(VariableList::from(blob_sidecars)) + Ok(RuntimeVariableList::from_vec( + blob_sidecars, + spec.max_blobs_per_block(block.epoch()) as usize, + )) } } -pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; -pub type FixedBlobSidecarList = - FixedVector>>, ::MaxBlobsPerBlock>; +pub type BlobSidecarList = RuntimeVariableList>>; +/// Alias for a non length-constrained list of `BlobSidecar`s. +pub type FixedBlobSidecarList = RuntimeFixedVector>>>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; impl ForkVersionDeserialize for BlobSidecarList { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f0bfeba680b..65f4c37aa15 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -237,6 +237,7 @@ pub struct ChainSpec { pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, pub blob_sidecar_subnet_count: u64, + max_blobs_per_block: u64, /* * Networking Derived @@ -616,6 +617,17 @@ impl ChainSpec { } } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. + pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { + self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) + } + + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. + pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { + // TODO(electra): add Electra blobs per block change here + self.max_blobs_per_block + } + pub fn data_columns_per_subnet(&self) -> usize { self.number_of_columns .safe_div(self.data_column_sidecar_subnet_count as usize) @@ -859,6 +871,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1187,6 +1200,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1388,6 +1402,9 @@ pub struct Config { #[serde(default = "default_blob_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] blob_sidecar_subnet_count: u64, + #[serde(default = "default_max_blobs_per_block")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block: u64, #[serde(default = "default_min_per_epoch_churn_limit_electra")] #[serde(with = "serde_utils::quoted_u64")] @@ -1523,6 +1540,12 @@ const fn default_blob_sidecar_subnet_count() -> u64 { 6 } +/// Its important to keep this consistent with the deneb preset value for +/// `MAX_BLOBS_PER_BLOCK` else we might run into consensus issues. +const fn default_max_blobs_per_block() -> u64 { + 6 +} + const fn default_min_per_epoch_churn_limit_electra() -> u64 { 128_000_000_000 } @@ -1745,6 +1768,7 @@ impl Config { max_request_data_column_sidecars: spec.max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, + max_blobs_per_block: spec.max_blobs_per_block, min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec @@ -1822,6 +1846,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, @@ -1890,6 +1915,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 57251e319a4..b2a050e9d5e 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -11,7 +11,6 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::Unsigned; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; use std::hash::Hash; @@ -68,6 +67,10 @@ impl DataColumnSidecar { self.signed_block_header.message.slot } + pub fn epoch(&self) -> Epoch { + self.slot().epoch(E::slots_per_epoch()) + } + pub fn block_root(&self) -> Hash256 { self.signed_block_header.message.tree_hash_root() } @@ -110,18 +113,16 @@ impl DataColumnSidecar { .len() } - pub fn max_size() -> usize { + pub fn max_size(max_blobs_per_block: usize) -> usize { Self { index: 0, - column: VariableList::new(vec![Cell::::default(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + column: VariableList::new(vec![Cell::::default(); max_blobs_per_block]).unwrap(), kzg_commitments: VariableList::new(vec![ KzgCommitment::empty_for_testing(); - E::MaxBlobsPerBlock::to_usize() + max_blobs_per_block ]) .unwrap(), - kzg_proofs: VariableList::new(vec![KzgProof::empty(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty(); max_blobs_per_block]).unwrap(), signed_block_header: SignedBeaconBlockHeader { message: BeaconBlockHeader::empty(), signature: Signature::empty(), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 23e82762096..976766dfa9d 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -4,8 +4,7 @@ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, - U8192, + U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, }; use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; @@ -109,7 +108,6 @@ pub trait EthSpec: /* * New in Deneb */ - type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -281,11 +279,6 @@ pub trait EthSpec: Self::MaxWithdrawalsPerPayload::to_usize() } - /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. - fn max_blobs_per_block() -> usize { - Self::MaxBlobsPerBlock::to_usize() - } - /// Returns the `MAX_BLOB_COMMITMENTS_PER_BLOCK` constant for this specification. fn max_blob_commitments_per_block() -> usize { Self::MaxBlobCommitmentsPerBlock::to_usize() @@ -421,7 +414,6 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; @@ -505,7 +497,6 @@ impl EthSpec for MinimalEthSpec { MinGasLimit, MaxExtraDataBytes, MaxBlsToExecutionChanges, - MaxBlobsPerBlock, BytesPerFieldElement, PendingDepositsLimit, MaxPendingDepositsPerEpoch, @@ -559,7 +550,6 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U8; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 282f27a5179..54d8bf51b6a 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -108,6 +108,7 @@ pub mod data_column_sidecar; pub mod data_column_subnet_id; pub mod light_client_header; pub mod non_zero_usize; +pub mod runtime_fixed_vector; pub mod runtime_var_list; pub use crate::activation_queue::ActivationQueue; @@ -223,6 +224,7 @@ pub use crate::preset::{ pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::runtime_fixed_vector::RuntimeFixedVector; pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f8b36654093..f64b7051e5f 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -205,8 +205,6 @@ impl CapellaPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct DenebPreset { - #[serde(with = "serde_utils::quoted_u64")] - pub max_blobs_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_blob_commitments_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -216,7 +214,6 @@ pub struct DenebPreset { impl DenebPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { - max_blobs_per_block: E::max_blobs_per_block() as u64, max_blob_commitments_per_block: E::max_blob_commitments_per_block() as u64, field_elements_per_blob: E::field_elements_per_blob() as u64, } diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs new file mode 100644 index 00000000000..2b08b7bf702 --- /dev/null +++ b/consensus/types/src/runtime_fixed_vector.rs @@ -0,0 +1,81 @@ +//! Emulates a fixed size array but with the length set at runtime. +//! +//! The length of the list cannot be changed once it is set. + +#[derive(Clone, Debug)] +pub struct RuntimeFixedVector { + vec: Vec, + len: usize, +} + +impl RuntimeFixedVector { + pub fn new(vec: Vec) -> Self { + let len = vec.len(); + Self { vec, len } + } + + pub fn to_vec(&self) -> Vec { + self.vec.clone() + } + + pub fn as_slice(&self) -> &[T] { + self.vec.as_slice() + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.len + } + + pub fn into_vec(self) -> Vec { + self.vec + } + + pub fn default(max_len: usize) -> Self { + Self { + vec: vec![T::default(); max_len], + len: max_len, + } + } + + pub fn take(&mut self) -> Self { + let new = std::mem::take(&mut self.vec); + *self = Self::new(vec![T::default(); self.len]); + Self { + vec: new, + len: self.len, + } + } +} + +impl std::ops::Deref for RuntimeFixedVector { + type Target = [T]; + + fn deref(&self) -> &[T] { + &self.vec[..] + } +} + +impl std::ops::DerefMut for RuntimeFixedVector { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + +impl IntoIterator for RuntimeFixedVector { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a RuntimeFixedVector { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.vec.iter() + } +} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 8290876fa1f..857073b3b84 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -2,7 +2,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::Error; -use std::ops::{Deref, DerefMut, Index, IndexMut}; +use std::ops::{Deref, Index, IndexMut}; use std::slice::SliceIndex; /// Emulates a SSZ `List`. @@ -10,6 +10,8 @@ use std::slice::SliceIndex; /// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than /// `max_len` values. /// +/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. +/// /// ## Example /// /// ``` @@ -35,6 +37,7 @@ use std::slice::SliceIndex; /// /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); +/// /// ``` #[derive(Debug, Clone, Serialize, Deserialize, Derivative)] #[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] @@ -65,7 +68,7 @@ impl RuntimeVariableList { Self { vec, max_len } } - /// Create an empty list. + /// Create an empty list with the given `max_len`. pub fn empty(max_len: usize) -> Self { Self { vec: vec![], @@ -77,6 +80,10 @@ impl RuntimeVariableList { self.vec.as_slice() } + pub fn as_mut_slice(&mut self) -> &mut [T] { + self.vec.as_mut_slice() + } + /// Returns the number of values presently in `self`. pub fn len(&self) -> usize { self.vec.len() @@ -88,6 +95,8 @@ impl RuntimeVariableList { } /// Returns the type-level maximum length. + /// + /// Returns `None` if self is uninitialized with a max_len. pub fn max_len(&self) -> usize { self.max_len } @@ -169,12 +178,6 @@ impl Deref for RuntimeVariableList { } } -impl DerefMut for RuntimeVariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - impl<'a, T> IntoIterator for &'a RuntimeVariableList { type Item = &'a T; type IntoIter = std::slice::Iter<'a, T>; diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 7719f02aa33..2e2c27a2dbf 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -9,6 +9,7 @@ use execution_layer::{ }; use std::net::Ipv4Addr; use std::path::PathBuf; +use std::sync::Arc; use types::*; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { @@ -22,7 +23,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let osaka_time = parse_optional(matches, "osaka-time")?; let handle = env.core_context().executor.handle().unwrap(); - let spec = &E::default_spec(); + let spec = Arc::new(E::default_spec()); let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(); std::fs::write(jwt_path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -41,7 +42,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< osaka_time, }; let kzg = None; - let server: MockServer = MockServer::new_with_config(&handle, config, kzg); + let server: MockServer = MockServer::new_with_config(&handle, config, spec, kzg); if all_payloads_valid { eprintln!( diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index ac01c84b9d0..6e632ccf549 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -7,6 +7,7 @@ use environment::RuntimeContext; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts}; use sensitive_url::SensitiveUrl; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; @@ -248,8 +249,14 @@ impl LocalExecutionNode { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } + let spec = Arc::new(E::default_spec()); Self { - server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), + server: MockServer::new_with_config( + &context.executor.handle().unwrap(), + config, + spec, + None, + ), datadir, } }