Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into anchor_slot_pruning
Browse files Browse the repository at this point in the history
  • Loading branch information
michaelsproul committed Jan 10, 2025
2 parents 7c414cc + a244aa3 commit eefa6e5
Show file tree
Hide file tree
Showing 71 changed files with 354 additions and 186 deletions.
2 changes: 2 additions & 0 deletions .github/CODEOWNERS
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
beacon_node/network/ @jxs
beacon_node/lighthouse_network/ @jxs
6 changes: 5 additions & 1 deletion .github/workflows/test-suite.yml
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,7 @@ jobs:
- name: Check formatting with cargo fmt
run: make cargo-fmt
- name: Lint code for quality and style with Clippy
run: make lint
run: make lint-full
- name: Certify Cargo.lock freshness
run: git diff --exit-code Cargo.lock
- name: Typecheck benchmark code without running it
Expand Down Expand Up @@ -392,6 +392,10 @@ jobs:
cache: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Fetch libssl1.1
run: wget https://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb
- name: Install libssl1.1
run: sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb
- name: Create Cargo config dir
run: mkdir -p .cargo
- name: Install custom Cargo config
Expand Down
6 changes: 5 additions & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine
# Lints the code for bad style and potentially unsafe arithmetic using Clippy.
# Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints.
lint:
RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \
-D clippy::fn_to_numeric_cast_any \
-D clippy::manual_let_else \
-D clippy::large_stack_frames \
Expand All @@ -220,6 +220,10 @@ lint:
lint-fix:
EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint

# Also run the lints on the optimized-only tests
lint-full:
RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint

# Runs the makefile in the `ef_tests` repo.
#
# May download and extract an archive of test vectors from the ethereum
Expand Down
10 changes: 5 additions & 5 deletions beacon_node/beacon_chain/src/beacon_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -573,7 +573,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.start_slot(T::EthSpec::slots_per_epoch());
let is_canonical = self
.block_root_at_slot(block_slot, WhenSlotSkipped::None)?
.map_or(false, |canonical_root| block_root == &canonical_root);
.is_some_and(|canonical_root| block_root == &canonical_root);
Ok(block_slot <= finalized_slot && is_canonical)
}

Expand Down Expand Up @@ -604,7 +604,7 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
let slot_is_finalized = state_slot <= finalized_slot;
let canonical = self
.state_root_at_slot(state_slot)?
.map_or(false, |canonical_root| state_root == &canonical_root);
.is_some_and(|canonical_root| state_root == &canonical_root);
Ok(FinalizationAndCanonicity {
slot_is_finalized,
canonical,
Expand Down Expand Up @@ -5118,9 +5118,9 @@ impl<T: BeaconChainTypes> BeaconChain<T> {
.start_of(slot)
.unwrap_or_else(|| Duration::from_secs(0)),
);
block_delays.observed.map_or(false, |delay| {
delay >= self.slot_clock.unagg_attestation_production_delay()
})
block_delays
.observed
.is_some_and(|delay| delay >= self.slot_clock.unagg_attestation_production_delay())
}

/// Produce a block for some `slot` upon the given `state`.
Expand Down
6 changes: 1 addition & 5 deletions beacon_node/beacon_chain/src/canonical_head.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1254,11 +1254,7 @@ pub fn find_reorg_slot<E: EthSpec>(
($state: ident, $block_root: ident) => {
std::iter::once(Ok(($state.slot(), $block_root)))
.chain($state.rev_iter_block_roots(spec))
.skip_while(|result| {
result
.as_ref()
.map_or(false, |(slot, _)| *slot > lowest_slot)
})
.skip_while(|result| result.as_ref().is_ok_and(|(slot, _)| *slot > lowest_slot))
};
}

Expand Down
6 changes: 3 additions & 3 deletions beacon_node/beacon_chain/src/data_availability_checker.rs
Original file line number Diff line number Diff line change
Expand Up @@ -519,13 +519,13 @@ impl<T: BeaconChainTypes> DataAvailabilityChecker<T> {
/// Returns true if the given epoch lies within the da boundary and false otherwise.
pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool {
self.data_availability_boundary()
.map_or(false, |da_epoch| block_epoch >= da_epoch)
.is_some_and(|da_epoch| block_epoch >= da_epoch)
}

/// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch.
pub fn is_deneb(&self) -> bool {
self.slot_clock.now().map_or(false, |slot| {
self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| {
self.slot_clock.now().is_some_and(|slot| {
self.spec.deneb_fork_epoch.is_some_and(|deneb_epoch| {
let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch());
now_epoch >= deneb_epoch
})
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -228,13 +228,10 @@ impl<E: EthSpec> PendingComponents<E> {
);

let all_blobs_received = block_kzg_commitments_count_opt
.map_or(false, |num_expected_blobs| {
num_expected_blobs == num_received_blobs
});
.is_some_and(|num_expected_blobs| num_expected_blobs == num_received_blobs);

let all_columns_received = expected_columns_opt.map_or(false, |num_expected_columns| {
num_expected_columns == num_received_columns
});
let all_columns_received = expected_columns_opt
.is_some_and(|num_expected_columns| num_expected_columns == num_received_columns);

all_blobs_received || all_columns_received
}
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/early_attester_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ impl<E: EthSpec> EarlyAttesterCache<E> {
self.item
.read()
.as_ref()
.map_or(false, |item| item.beacon_block_root == block_root)
.is_some_and(|item| item.beacon_block_root == block_root)
}

/// Returns the block, if `block_root` matches the cached item.
Expand Down
2 changes: 1 addition & 1 deletion beacon_node/beacon_chain/src/eth1_chain.rs
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ fn get_sync_status<E: EthSpec>(
// Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the
// current voting period.
let lighthouse_is_cached_and_ready =
latest_cached_block_timestamp.map_or(false, |t| t >= voting_target_timestamp);
latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp);

Some(Eth1SyncStatusData {
head_block_number,
Expand Down
8 changes: 4 additions & 4 deletions beacon_node/beacon_chain/src/execution_payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,9 @@ impl<T: BeaconChainTypes> PayloadNotifier<T> {
/// contains a few extra checks by running `partially_verify_execution_payload` first:
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload
async fn notify_new_payload<'a, T: BeaconChainTypes>(
async fn notify_new_payload<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
block: BeaconBlockRef<'a, T::EthSpec>,
block: BeaconBlockRef<'_, T::EthSpec>,
) -> Result<PayloadVerificationStatus, BlockError> {
let execution_layer = chain
.execution_layer
Expand Down Expand Up @@ -230,9 +230,9 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>(
/// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes:
///
/// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block
pub async fn validate_merge_block<'a, T: BeaconChainTypes>(
pub async fn validate_merge_block<T: BeaconChainTypes>(
chain: &Arc<BeaconChain<T>>,
block: BeaconBlockRef<'a, T::EthSpec>,
block: BeaconBlockRef<'_, T::EthSpec>,
allow_optimistic_import: AllowOptimisticImport,
) -> Result<(), BlockError> {
let spec = &chain.spec;
Expand Down
7 changes: 2 additions & 5 deletions beacon_node/beacon_chain/src/graffiti_calculator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -293,10 +293,7 @@ mod tests {
.await
.unwrap();

let version_bytes = std::cmp::min(
lighthouse_version::VERSION.as_bytes().len(),
GRAFFITI_BYTES_LEN,
);
let version_bytes = std::cmp::min(lighthouse_version::VERSION.len(), GRAFFITI_BYTES_LEN);
// grab the slice of the graffiti that corresponds to the lighthouse version
let graffiti_slice =
&harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes];
Expand Down Expand Up @@ -361,7 +358,7 @@ mod tests {

let graffiti_str = "nice graffiti bro";
let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN];
graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes());
graffiti_bytes[..graffiti_str.len()].copy_from_slice(graffiti_str.as_bytes());

let found_graffiti = harness
.chain
Expand Down
143 changes: 132 additions & 11 deletions beacon_node/beacon_chain/src/kzg_utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@ use std::sync::Arc;
use types::beacon_block_body::KzgCommitments;
use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError};
use types::{
Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256,
KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader,
Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar,
DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock,
SignedBeaconBlockHeader, SignedBlindedBeaconBlock,
};

/// Converts a blob ssz List object to an array to be used with the kzg
Expand Down Expand Up @@ -243,6 +244,83 @@ fn build_data_column_sidecars<E: EthSpec>(
Ok(sidecars)
}

/// Reconstruct blobs from a subset of data column sidecars (requires at least 50%).
///
/// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated
/// with the block.
pub fn reconstruct_blobs<E: EthSpec>(
kzg: &Kzg,
data_columns: &[Arc<DataColumnSidecar<E>>],
blob_indices_opt: Option<Vec<u64>>,
signed_block: &SignedBlindedBeaconBlock<E>,
) -> Result<BlobSidecarList<E>, String> {
// The data columns are from the database, so we assume their correctness.
let first_data_column = data_columns
.first()
.ok_or("data_columns should have at least one element".to_string())?;

let blob_indices: Vec<usize> = match blob_indices_opt {
Some(indices) => indices.into_iter().map(|i| i as usize).collect(),
None => {
let num_of_blobs = first_data_column.kzg_commitments.len();
(0..num_of_blobs).collect()
}
};

let blob_sidecars = blob_indices
.into_par_iter()
.map(|row_index| {
let mut cells: Vec<KzgCellRef> = vec![];
let mut cell_ids: Vec<u64> = vec![];
for data_column in data_columns {
let cell = data_column
.column
.get(row_index)
.ok_or(format!("Missing data column at row index {row_index}"))
.and_then(|cell| {
ssz_cell_to_crypto_cell::<E>(cell).map_err(|e| format!("{e:?}"))
})?;

cells.push(cell);
cell_ids.push(data_column.index);
}

let (cells, _kzg_proofs) = kzg
.recover_cells_and_compute_kzg_proofs(&cell_ids, &cells)
.map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?;

let num_cells_original_blob = cells.len() / 2;
let blob_bytes = cells
.into_iter()
.take(num_cells_original_blob)
.flat_map(|cell| cell.into_iter())
.collect();

let blob = Blob::<E>::new(blob_bytes).map_err(|e| format!("{e:?}"))?;
let kzg_commitment = first_data_column
.kzg_commitments
.get(row_index)
.ok_or(format!("Missing KZG commitment for blob {row_index}"))?;
let kzg_proof = compute_blob_kzg_proof::<E>(kzg, &blob, *kzg_commitment)
.map_err(|e| format!("{e:?}"))?;

BlobSidecar::<E>::new_with_existing_proof(
row_index,
blob,
signed_block,
first_data_column.signed_block_header.clone(),
&first_data_column.kzg_commitments_inclusion_proof,
kzg_proof,
)
.map(Arc::new)
.map_err(|e| format!("{e:?}"))
})
.collect::<Result<Vec<_>, _>>()?
.into();

Ok(blob_sidecars)
}

/// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%).
pub fn reconstruct_data_columns<E: EthSpec>(
kzg: &Kzg,
Expand All @@ -265,7 +343,7 @@ pub fn reconstruct_data_columns<E: EthSpec>(
for data_column in data_columns {
let cell = data_column.column.get(row_index).ok_or(
KzgError::InconsistentArrayLength(format!(
"Missing data column at index {row_index}"
"Missing data column at row index {row_index}"
)),
)?;

Expand All @@ -289,12 +367,16 @@ pub fn reconstruct_data_columns<E: EthSpec>(

#[cfg(test)]
mod test {
use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns};
use crate::kzg_utils::{
blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns,
};
use bls::Signature;
use eth2::types::BlobsBundle;
use execution_layer::test_utils::generate_blobs;
use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup};
use types::{
beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList,
ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock,
beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec,
EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock,
};

type E = MainnetEthSpec;
Expand All @@ -308,6 +390,7 @@ mod test {
test_build_data_columns_empty(&kzg, &spec);
test_build_data_columns(&kzg, &spec);
test_reconstruct_data_columns(&kzg, &spec);
test_reconstruct_blobs_from_data_columns(&kzg, &spec);
}

#[track_caller]
Expand Down Expand Up @@ -379,6 +462,36 @@ mod test {
}
}

#[track_caller]
fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) {
let num_of_blobs = 6;
let (signed_block, blobs) = create_test_block_and_blobs::<E>(num_of_blobs, spec);
let blob_refs = blobs.iter().collect::<Vec<_>>();
let column_sidecars =
blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap();

// Now reconstruct
let signed_blinded_block = signed_block.into();
let blob_indices = vec![3, 4, 5];
let reconstructed_blobs = reconstruct_blobs(
kzg,
&column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2],
Some(blob_indices.clone()),
&signed_blinded_block,
)
.unwrap();

for i in blob_indices {
let reconstructed_blob = &reconstructed_blobs
.iter()
.find(|sidecar| sidecar.index == i)
.map(|sidecar| sidecar.blob.clone())
.expect("reconstructed blob should exist");
let original_blob = blobs.get(i as usize).unwrap();
assert_eq!(reconstructed_blob, original_blob, "{i}");
}
}

fn get_kzg() -> Kzg {
let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice())
.map_err(|e| format!("Unable to read trusted setup file: {}", e))
Expand All @@ -397,12 +510,20 @@ mod test {
KzgCommitments::<E>::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs])
.unwrap();

let signed_block = SignedBeaconBlock::from_block(block, Signature::empty());
let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty());

let (blobs_bundle, _) = generate_blobs::<E>(num_of_blobs).unwrap();
let BlobsBundle {
blobs,
commitments,
proofs: _,
} = blobs_bundle;

let blobs = (0..num_of_blobs)
.map(|_| Blob::<E>::default())
.collect::<Vec<_>>()
.into();
*signed_block
.message_mut()
.body_mut()
.blob_kzg_commitments_mut()
.unwrap() = commitments;

(signed_block, blobs)
}
Expand Down
Loading

0 comments on commit eefa6e5

Please sign in to comment.