Skip to content

Commit

Permalink
Resolve more todos
Browse files Browse the repository at this point in the history
  • Loading branch information
dapplion committed Mar 20, 2024
1 parent b36b430 commit ea4fcef
Show file tree
Hide file tree
Showing 9 changed files with 17 additions and 197 deletions.
57 changes: 0 additions & 57 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,63 +211,6 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
}
}

pub fn get_missing_data_column_ids(
&self,
block_root: Hash256,
block: &Option<Arc<SignedBeaconBlock<T::EthSpec>>>,
) -> MissingDataColumns {
let Some(current_slot) = self.slot_clock.now_or_genesis() else {
error!(
self.log,
"Failed to read slot clock when checking for missing blob ids"
);
return MissingDataColumns::NotRequired;
};

if !self.da_check_required_for_epoch(current_slot.epoch(T::EthSpec::slots_per_epoch())) {
return MissingDataColumns::NotRequired;
}

// Compute ids for sampling
let mut sampling_ids = self
.compute_sampling_column_ids(block_root)
.into_iter()
.map(|index| BlobIdentifier {
block_root,
index: index as u64,
})
.collect::<Vec<_>>();

// Compute ids for custody
match block {
Some(cached_block) => {
sampling_ids.extend_from_slice(
&self
.compute_custody_column_ids(cached_block.slot())
.into_iter()
.map(|index| BlobIdentifier {
block_root,
index: index as u64,
})
.collect::<Vec<_>>(),
);
MissingDataColumns::KnownMissing(sampling_ids)
}
// TODO(das): without knowledge of the block's slot you can't know your custody
// requirements. When syncing a block via single block lookup, you need to fetch the
// block first, then fetch your custody columns
None => MissingDataColumns::KnownMissingIncomplete(sampling_ids),
}
}

fn compute_sampling_column_ids(&self, block_root: Hash256) -> Vec<usize> {
todo!("Use local randomness to derive this block's sample column ids")
}

fn compute_custody_column_ids(&self, slot: Slot) -> Vec<usize> {
todo!("Use local node ID and custody parameter to compute column ids for this slot");
}

/// Get a blob from the availability cache.
pub fn get_blob(
&self,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
//! the cache when they are accessed.
use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache};
use super::{CustodyConfig, NodeIdRaw};
use super::CustodyConfig;
use crate::beacon_chain::BeaconStore;
use crate::blob_verification::KzgVerifiedBlob;
use crate::block_verification_types::{
Expand All @@ -47,7 +47,7 @@ use std::num::NonZeroUsize;
use std::{collections::HashSet, sync::Arc};
use types::blob_sidecar::BlobIdentifier;
use types::data_column_sidecar::{ColumnIndex, DataColumnIdentifier};
use types::{BlobSidecar, ChainSpec, DataColumnSidecar, Epoch, EthSpec, ForkName, Hash256, Slot};
use types::{BlobSidecar, ChainSpec, DataColumnSidecar, Epoch, EthSpec, Hash256, Slot};

/// This represents the components of a partially available block
///
Expand Down Expand Up @@ -80,50 +80,13 @@ impl<T: EthSpec> PendingComponents<T> {
.map(|b| b.as_block().message().slot())
}

/// Checks if a block exists in the cache.
///
/// Returns:
/// - `true` if a block exists.
/// - `false` otherwise.
fn block_exists(&self) -> bool {
self.executed_block.is_some()
}

/// Checks if a blob exists at the given index in the cache.
///
/// Returns:
/// - `true` if a blob exists at the given index.
/// - `false` otherwise.
fn blob_exists(&self, blob_index: usize) -> bool {
self.verified_blobs
.get(blob_index)
.map(|b| b.is_some())
.unwrap_or(false)
}

fn data_column_exists(&self, data_colum_index: ColumnIndex) -> bool {
self.verified_data_columns
.iter()
.find(|c| c.data_column_index() == data_colum_index)
.is_some()
}

/// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a
/// block.
///
/// This corresponds to the number of commitments that are present in a block.
fn num_expected_blobs(&self) -> Option<usize> {
self.executed_block.as_ref().map(|b| {
b.as_block()
.message()
.body()
.blob_kzg_commitments()
.cloned()
.unwrap_or_default()
.len()
})
}

/// Returns the number of blobs that have been received and are stored in the cache.
fn num_received_blobs(&self) -> usize {
self.verified_blobs.iter().flatten().count()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ use super::score::{PeerAction, Score, ScoreState};
use super::sync_status::SyncStatus;
use crate::discovery::Eth2Enr;
use crate::{rpc::MetaData, types::Subnet};
use discv5::enr::NodeId;
use discv5::Enr;
use libp2p::core::multiaddr::{Multiaddr, Protocol};
use serde::{
Expand Down
10 changes: 0 additions & 10 deletions beacon_node/network/src/network_beacon_processor/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -502,16 +502,6 @@ impl<T: BeaconChainTypes> NetworkBeaconProcessor<T> {
})
}

pub fn sampling_completed(self: &Arc<Self>, block_root: Hash256) {
todo!("send work event to process sampling result");

// Sync handles these results
// self.send_sync_message(SyncMessage::SampleProcessed {
// id,
// result: result.into(),
// });
}

/// Create a new work event to import `blocks` as a beacon chain segment.
pub fn send_chain_segment(
self: &Arc<Self>,
Expand Down
6 changes: 2 additions & 4 deletions beacon_node/network/src/sync/block_lookups/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,11 @@ pub use crate::sync::block_lookups::single_block_lookup::{
CachedChild, ColumnRequestState, LookupRequestError,
};
use crate::sync::manager::{Id, SingleLookupReqId};
use crate::sync::network_context::PeersByCustody;
use beacon_chain::block_verification_types::{AsBlock, RpcBlock};
pub use beacon_chain::data_availability_checker::ChildComponents;
use beacon_chain::data_availability_checker::{
compute_custody_requirements, AvailabilityCheckErrorCategory, CustodyConfig,
DataAvailabilityChecker, NodeIdRaw,
DataAvailabilityChecker,
};
use beacon_chain::validator_monitor::timestamp_now;
use beacon_chain::{AvailabilityProcessingStatus, BeaconChainTypes, BlockError};
Expand All @@ -36,7 +35,7 @@ use std::sync::Arc;
use std::time::Duration;
use store::Hash256;
use types::blob_sidecar::FixedBlobSidecarList;
use types::{DataColumnSidecar, EthSpec, Slot};
use types::{DataColumnSidecar, Slot};

pub mod common;
mod parent_lookup;
Expand Down Expand Up @@ -215,7 +214,6 @@ impl<T: BeaconChainTypes> BlockLookups<T> {
child_components,
peers,
self.da_checker.clone(),
PeersByCustody::new(),
cx.next_id(),
);

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ impl<T: BeaconChainTypes> ParentLookup<T> {
Some(ChildComponents::empty(block_root)),
&[peer_id],
da_checker,
cx.peers_by_custody(),
cx.next_id(),
);

Expand Down
80 changes: 12 additions & 68 deletions beacon_node/network/src/sync/block_lookups/single_block_lookup.rs
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
use super::PeerId;
use crate::sync::block_lookups::common::{Lookup, RequestState};
use crate::sync::block_lookups::Id;
use crate::sync::network_context::{PeersByCustody, SyncNetworkContext};
use crate::sync::network_context::SyncNetworkContext;
use beacon_chain::block_verification_types::RpcBlock;
use beacon_chain::data_availability_checker::{
compute_custody_requirements, compute_sample_requirements, AvailabilityCheckError,
Expand Down Expand Up @@ -70,30 +70,9 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
child_components: Option<ChildComponents<T::EthSpec>>,
peers: &[PeerId],
da_checker: Arc<DataAvailabilityChecker<T>>,
peer_by_custody: PeersByCustody,
id: Id,
) -> Self {
let is_deneb = da_checker.is_deneb();
let missing_column_ids = missing_data_column_ids(
requested_block_root,
child_components.as_ref(),
da_checker.clone(),
);

let columns_request_state = missing_column_ids
.indices()
.iter()
.map(|index| {
ColumnRequestState::<L, T::EthSpec>::new(
DataColumnIdentifier {
block_root: requested_block_root,
index: *index,
},
// Initially no peers slot unknown
&vec![],
)
})
.collect::<Vec<_>>();

Self {
id,
Expand Down Expand Up @@ -227,10 +206,16 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
return CachedChild::DownloadIncomplete;
};

if !self.missing_blob_ids().is_empty() || !self.missing_data_column_ids().is_empty() {
if !self.missing_blob_ids().is_empty() {
return CachedChild::DownloadIncomplete;
}

// TODO(das): consider column custody and sampling. I don't want to duplicate logic, and
// it's odd that child component download has different paths than regular lookups. I
// feels like this can be simplified by unifying both paths. The whole child block
// business is regular download that should be delayed for processing until latter when
// all parents are known.

match RpcBlock::new_from_fixed(
self.block_request_state.requested_block_root,
block.clone(),
Expand Down Expand Up @@ -380,11 +365,6 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
self.blob_request_state.requested_ids = self.missing_blob_ids();
}

pub(crate) fn columns_already_downloaded(&mut self) -> bool {
// TODO(das) request_ids is not actually used now
todo!("deduplicate requests properly");
}

/// If `child_components` is `Some`, we know block components won't hit the data
/// availability cache, so we don't check its processing cache unless `child_components`
/// is `None`.
Expand All @@ -410,14 +390,6 @@ impl<L: Lookup, T: BeaconChainTypes> SingleBlockLookup<L, T> {
}
}

pub(crate) fn missing_data_column_ids(&self) -> MissingDataColumns {
missing_data_column_ids(
self.block_root(),
self.child_components.as_ref(),
self.da_checker.clone(),
)
}

/// Penalizes a blob peer if it should have blobs but didn't return them to us.
pub fn penalize_blob_peer(&mut self, cx: &SyncNetworkContext<T>) {
if let Ok(blob_peer) = self.blob_request_state.state.processing_peer() {
Expand Down Expand Up @@ -459,19 +431,6 @@ pub enum ColumnsRequestState<L: Lookup, E: EthSpec> {
},
}

fn missing_data_column_ids<T: BeaconChainTypes>(
block_root: Hash256,
child_components: Option<&ChildComponents<T::EthSpec>>,
da_checker: Arc<DataAvailabilityChecker<T>>,
) -> MissingDataColumns {
if let Some(components) = child_components {
da_checker.get_missing_data_column_ids(block_root, &components.downloaded_block)
} else {
// TODO(das): same code as for missing_blob_ids?
todo!()
}
}

/// The state of the blob request component of a `SingleBlockLookup`.
pub struct BlobRequestState<L: Lookup, T: EthSpec> {
/// The latest picture of which blobs still need to be requested. This includes information
Expand Down Expand Up @@ -759,15 +718,8 @@ mod tests {
HotColdDB::open_ephemeral(StoreConfig::default(), ChainSpec::minimal(), log.clone())
.expect("store");
let da_checker = Arc::new(
DataAvailabilityChecker::new(
slot_clock,
None,
store.into(),
&log,
spec.clone(),
NODE_ID,
)
.expect("data availability checker"),
DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone())
.expect("data availability checker"),
);
let mut sl = SingleBlockLookup::<TestLookup1, T>::new(
block.canonical_root(),
Expand Down Expand Up @@ -809,23 +761,15 @@ mod tests {
.expect("store");

let da_checker = Arc::new(
DataAvailabilityChecker::new(
slot_clock,
None,
store.into(),
&log,
spec.clone(),
NODE_ID,
)
.expect("data availability checker"),
DataAvailabilityChecker::new(slot_clock, None, store.into(), &log, spec.clone())
.expect("data availability checker"),
);

let mut sl = SingleBlockLookup::<TestLookup2, T>::new(
block.canonical_root(),
None,
&[peer_id],
da_checker,
PeersByCustody::new(),
1,
);
for _ in 1..TestLookup2::MAX_ATTEMPTS {
Expand Down
16 changes: 0 additions & 16 deletions beacon_node/network/src/sync/network_context.rs
Original file line number Diff line number Diff line change
Expand Up @@ -91,18 +91,6 @@ impl<T: EthSpec> From<Option<Arc<BlobSidecar<T>>>> for BlockOrBlob<T> {
}
}

pub struct PeersByCustody {}

impl PeersByCustody {
pub fn new() -> Self {
Self {}
}

pub fn custodial_peers(&self, slot: Slot, column_id: u64) -> Vec<PeerId> {
todo!();
}
}

fn custody_config_from_peer_info<E: EthSpec>(peer_info: &PeerInfo<E>) -> Option<CustodyConfig> {
peer_info
.node_id()
Expand Down Expand Up @@ -134,10 +122,6 @@ impl<T: BeaconChainTypes> SyncNetworkContext<T> {
&self.network_beacon_processor.network_globals
}

pub fn peers_by_custody(&self) -> PeersByCustody {
todo!();
}

pub fn get_custody_config_of_peer(&self, peer_id: &PeerId) -> Option<CustodyConfig> {
self.network_globals()
.peers
Expand Down
2 changes: 1 addition & 1 deletion consensus/types/src/data_column_subnet_id.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
//! Identifies each data column subnet by an integer identifier.
use crate::{ChainSpec, EthSpec};
use crate::EthSpec;
use ethereum_types::U256;
use safe_arith::{ArithError, SafeArith};
use serde::{Deserialize, Serialize};
Expand Down

0 comments on commit ea4fcef

Please sign in to comment.