From 84519010f29b7f8ac50cb2e68ec6ffed69a6e6f2 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 7 Jan 2025 16:39:48 -0800 Subject: [PATCH 01/15] add joao CODEOWNERS (#6762) * add joao CODEOWNERS --- .github/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..f9478d1369 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +beacon_node/network/ @jxs +beacon_node/lighthouse_network/ @jxs From 57141d8b4bab2d8281a01629de04d9a935f00d1c Mon Sep 17 00:00:00 2001 From: Ekaterina Riazantseva Date: Wed, 8 Jan 2025 01:39:53 +0100 Subject: [PATCH 02/15] Add 'beacon_' prefix to PeerDAS metrics names (#6537) * Add 'beacon_' prefix to PeerDAS metrics names * Merge remote-tracking branch 'origin/unstable' into peerdas-metrics * Merge 'origin/unstable' into peerdas-metrics * Merge remote-tracking branch 'origin/unstable/ into peerdas-metrics * Add 'beacon_' prefix to 'kzg_data_column' metrics --- beacon_node/beacon_chain/src/metrics.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c6aa9fbcac..8d71e895c9 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1656,7 +1656,7 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> }); pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { try_create_histogram_vec_with_buckets( - "data_column_sidecar_computation_seconds", + "beacon_data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], @@ -1665,7 +1665,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_column_sidecar_inclusion_proof_verification_seconds", + "beacon_data_column_sidecar_inclusion_proof_verification_seconds", "Time taken to verify data_column sidecar inclusion proof", ) }); @@ -1847,7 +1847,7 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_single_seconds", + "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification", Ok(vec![ 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, @@ -1857,7 +1857,7 @@ pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_batch_seconds", + "beacon_kzg_verification_data_column_batch_seconds", "Runtime of batched data column kzg verification", Ok(vec![ 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, @@ -1910,14 +1910,14 @@ pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_availability_reconstruction_time_seconds", + "beacon_data_availability_reconstruction_time_seconds", "Time taken to reconstruct columns", ) }); pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "data_availability_reconstructed_columns_total", + "beacon_data_availability_reconstructed_columns_total", "Total count of reconstructed columns", ) }); @@ -1925,7 +1925,7 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_attempts", + "beacon_kzg_data_column_reconstruction_attempts", "Count of times data column reconstruction has been attempted", ) }); @@ -1933,7 +1933,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_failures", + "beacon_kzg_data_column_reconstruction_failures", "Count of times data column reconstruction has failed", ) }); @@ -1941,7 +1941,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( - "kzg_data_column_reconstruction_incomplete_total", + "beacon_kzg_data_column_reconstruction_incomplete_total", "Count of times data column reconstruction attempts did not result in an import", &["reason"], ) From 7ec748a108bdef9fbe02ae9edb2f49f2682a555f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:34 +1100 Subject: [PATCH 03/15] Implement `getBlobSidecars` support for PeerDAS (#6755) * Implement getBlobSidecars endpoint for PeerDAS. * Merge branch 'unstable' into peerdas-get-blob-sidecars * Fix incorrect logging. * Replace `and_then` usage. --- beacon_node/beacon_chain/src/kzg_utils.rs | 143 ++++++++++++++++++++-- beacon_node/http_api/src/block_id.rs | 84 ++++++++++--- consensus/types/src/blob_sidecar.rs | 6 +- 3 files changed, 202 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 1680c0298d..bd47e82215 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ - Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, + Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -243,6 +244,83 @@ fn build_data_column_sidecars( Ok(sidecars) } +/// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). +/// +/// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated +/// with the block. +pub fn reconstruct_blobs( + kzg: &Kzg, + data_columns: &[Arc>], + blob_indices_opt: Option>, + signed_block: &SignedBlindedBeaconBlock, +) -> Result, String> { + // The data columns are from the database, so we assume their correctness. + let first_data_column = data_columns + .first() + .ok_or("data_columns should have at least one element".to_string())?; + + let blob_indices: Vec = match blob_indices_opt { + Some(indices) => indices.into_iter().map(|i| i as usize).collect(), + None => { + let num_of_blobs = first_data_column.kzg_commitments.len(); + (0..num_of_blobs).collect() + } + }; + + let blob_sidecars = blob_indices + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column + .column + .get(row_index) + .ok_or(format!("Missing data column at row index {row_index}")) + .and_then(|cell| { + ssz_cell_to_crypto_cell::(cell).map_err(|e| format!("{e:?}")) + })?; + + cells.push(cell); + cell_ids.push(data_column.index); + } + + let (cells, _kzg_proofs) = kzg + .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + .map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?; + + let num_cells_original_blob = cells.len() / 2; + let blob_bytes = cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| cell.into_iter()) + .collect(); + + let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; + let kzg_commitment = first_data_column + .kzg_commitments + .get(row_index) + .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; + let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) + .map_err(|e| format!("{e:?}"))?; + + BlobSidecar::::new_with_existing_proof( + row_index, + blob, + signed_block, + first_data_column.signed_block_header.clone(), + &first_data_column.kzg_commitments_inclusion_proof, + kzg_proof, + ) + .map(Arc::new) + .map_err(|e| format!("{e:?}")) + }) + .collect::, _>>()? + .into(); + + Ok(blob_sidecars) +} + /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). pub fn reconstruct_data_columns( kzg: &Kzg, @@ -265,7 +343,7 @@ pub fn reconstruct_data_columns( for data_column in data_columns { let cell = data_column.column.get(row_index).ok_or( KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" + "Missing data column at row index {row_index}" )), )?; @@ -289,12 +367,16 @@ pub fn reconstruct_data_columns( #[cfg(test)] mod test { - use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use crate::kzg_utils::{ + blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + }; use bls::Signature; + use eth2::types::BlobsBundle; + use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, - ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, + EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -308,6 +390,7 @@ mod test { test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns(&kzg, &spec); } #[track_caller] @@ -379,6 +462,36 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let signed_blinded_block = signed_block.into(); + let blob_indices = vec![3, 4, 5]; + let reconstructed_blobs = reconstruct_blobs( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + Some(blob_indices.clone()), + &signed_blinded_block, + ) + .unwrap(); + + for i in blob_indices { + let reconstructed_blob = &reconstructed_blobs + .iter() + .find(|sidecar| sidecar.index == i) + .map(|sidecar| sidecar.blob.clone()) + .expect("reconstructed blob should exist"); + let original_blob = blobs.get(i as usize).unwrap(); + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) @@ -397,12 +510,20 @@ mod test { KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) .unwrap(); - let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs: _, + } = blobs_bundle; - let blobs = (0..num_of_blobs) - .map(|_| Blob::::default()) - .collect::>() - .into(); + *signed_block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .unwrap() = commitments; (signed_block, blobs) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index dba8eb1ef3..b9e4883318 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,4 +1,5 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; @@ -9,6 +10,7 @@ use types::{ BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; +use warp::Rejection; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -261,7 +263,7 @@ impl BlockId { #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, - indices: BlobIndicesQuery, + query: BlobIndicesQuery, chain: &BeaconChain, ) -> Result< ( @@ -286,20 +288,32 @@ impl BlockId { // Return the `BlobSidecarList` identified by `self`. let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { - chain - .store - .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "no blobs stored for block {root}" - )) - })? + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? + } else { + Self::get_blobs(chain, root, query.indices)? + } } else { BlobSidecarList::default() }; - let blob_sidecar_list_filtered = match indices.indices { + Ok((block, blob_sidecar_list, execution_optimistic, finalized)) + } + + fn get_blobs( + chain: &BeaconChain, + root: Hash256, + indices: Option>, + ) -> Result, Rejection> { + let blob_sidecar_list = chain + .store + .get_blobs(&root) + .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) + })?; + + let blob_sidecar_list_filtered = match indices { Some(vec) => { let list = blob_sidecar_list .into_iter() @@ -310,12 +324,48 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(( - block, - blob_sidecar_list_filtered, - execution_optimistic, - finalized, - )) + + Ok(blob_sidecar_list_filtered) + } + + fn get_blobs_from_data_columns( + chain: &BeaconChain, + root: Hash256, + blob_indices: Option>, + block: &SignedBlindedBeaconBlock<::EthSpec>, + ) -> Result, Rejection> { + let column_indices = chain.store.get_data_column_keys(root).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error fetching data columns keys: {e:?}" + )) + })?; + + let num_found_column_keys = column_indices.len(); + let num_required_columns = chain.spec.number_of_columns / 2; + let is_blob_available = num_found_column_keys >= num_required_columns; + + if is_blob_available { + let data_columns = column_indices + .into_iter() + .filter_map( + |column_index| match chain.get_data_column(&root, &column_index) { + Ok(Some(data_column)) => Some(Ok(data_column)), + Ok(None) => None, + Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + }, + ) + .collect::, _>>()?; + + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }) + } else { + Err(warp_utils::reject::custom_server_error( + format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") + )) + } } } diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 5a330388cc..302aa2a4c1 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,9 @@ use crate::test_utils::TestRandom; -use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; +use crate::{AbstractExecPayload, ForkName}; use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; @@ -150,10 +150,10 @@ impl BlobSidecar { }) } - pub fn new_with_existing_proof( + pub fn new_with_existing_proof>( index: usize, blob: Blob, - signed_block: &SignedBeaconBlock, + signed_block: &SignedBeaconBlock, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &[Hash256], kzg_proof: KzgProof, From 80cfbea7fe4c78d90638b256b0cb7fc19652b31f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:37 +1100 Subject: [PATCH 04/15] Fix incorrect data column metric name (#6761) * Fix incorrect data column metric name. --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8d71e895c9..ae3add7f03 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1693,7 +1693,7 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_column_sidecar_processing_successes_total", + "beacon_data_column_sidecar_processing_successes_total", "Number of data column sidecars verified for gossip", ) }); From 87b72dec21759acfbc749220be3aee11ac91cdf3 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:39 +1100 Subject: [PATCH 05/15] Fix incorrect VC default HTTP token path when the `--datadir` flag is present (#6748) * Fix incorrect default http token path when datadir flag is present. --- lighthouse/tests/validator_client.rs | 15 ++++++++++++++- validator_client/http_api/src/lib.rs | 1 + validator_client/src/config.rs | 9 +++++---- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index c5b303e4d1..1945399c86 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -345,7 +345,7 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { } #[test] -fn http_token_path_flag() { +fn http_token_path_flag_present() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() .flag("http", None) @@ -359,6 +359,19 @@ fn http_token_path_flag() { }); } +#[test] +fn http_token_path_default() { + CommandLineTest::new() + .flag("http", None) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + config.validator_dir.join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index f3dab3780c..73ebe717af 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -106,6 +106,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { + // This value is always overridden when building config from CLI. let http_token_path = dirs::home_dir() .unwrap_or_else(|| PathBuf::from(".")) .join(DEFAULT_ROOT_DIR) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 0fecb5202d..bb72ef81c8 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -314,10 +314,11 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } - if cli_args.get_one::("http-token-path").is_some() { - config.http_api.http_token_path = parse_required(cli_args, "http-token-path") - // For backward compatibility, default to the path under the validator dir if not provided. - .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + config.http_api.http_token_path = PathBuf::from(http_token_path); + } else { + // For backward compatibility, default to the path under the validator dir if not provided. + config.http_api.http_token_path = config.validator_dir.join(PK_FILENAME); } /* From 1f6850fae2807c1d3f0e281524e0b1b9ab230e67 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 10 Jan 2025 06:43:29 +0530 Subject: [PATCH 06/15] Rust 1.84 lints (#6781) * Fix few lints * Fix remaining lints * Use fully qualified syntax --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++----- beacon_node/beacon_chain/src/canonical_head.rs | 6 +----- .../beacon_chain/src/data_availability_checker.rs | 6 +++--- .../overflow_lru_cache.rs | 9 +++------ .../beacon_chain/src/early_attester_cache.rs | 2 +- beacon_node/beacon_chain/src/eth1_chain.rs | 2 +- beacon_node/beacon_chain/src/execution_payload.rs | 8 ++++---- .../beacon_chain/src/graffiti_calculator.rs | 7 ++----- .../beacon_chain/src/observed_aggregates.rs | 2 +- beacon_node/beacon_chain/src/observed_attesters.rs | 4 ++-- .../beacon_chain/src/observed_data_sidecars.rs | 2 +- beacon_node/beacon_chain/src/shuffling_cache.rs | 2 +- beacon_node/beacon_processor/src/lib.rs | 2 +- beacon_node/client/src/builder.rs | 2 +- beacon_node/client/src/notifier.rs | 8 +++----- beacon_node/execution_layer/src/engine_api/http.rs | 10 ++++------ beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/execution_layer/src/payload_status.rs | 2 +- .../src/test_utils/mock_execution_layer.rs | 2 +- beacon_node/genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 ++-- beacon_node/http_api/src/validator.rs | 2 +- beacon_node/http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/tests.rs | 2 +- .../lighthouse_network/gossipsub/src/backoff.rs | 2 +- .../lighthouse_network/gossipsub/src/behaviour.rs | 3 +-- beacon_node/lighthouse_network/src/config.rs | 6 +++--- .../src/discovery/subnet_predicate.rs | 4 ++-- .../lighthouse_network/src/peer_manager/peerdb.rs | 2 +- .../src/peer_manager/peerdb/peer_info.rs | 4 ++-- .../src/network_beacon_processor/gossip_methods.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 14 +++++--------- beacon_node/store/src/forwards_iter.rs | 4 ++-- beacon_node/store/src/reconstruct.rs | 2 +- beacon_node/store/src/state_cache.rs | 8 ++------ common/account_utils/src/validator_definitions.rs | 2 +- common/logging/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 10 +++++----- consensus/state_processing/src/genesis.rs | 12 ++++++------ .../per_block_processing/altair/sync_committee.rs | 2 +- .../epoch_processing_summary.rs | 12 ++++++------ consensus/types/src/beacon_block_body.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- .../src/beacon_state/progressive_balances_cache.rs | 2 +- consensus/types/src/chain_spec.rs | 10 ++++------ consensus/types/src/deposit_tree_snapshot.rs | 3 +-- consensus/types/src/graffiti.rs | 2 +- lcli/src/transition_blocks.rs | 4 ++-- slasher/src/database.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 4 ++-- testing/ef_tests/src/decode.rs | 2 +- validator_client/doppelganger_service/src/lib.rs | 2 +- .../slashing_protection/src/slashing_database.rs | 4 +--- .../validator_services/src/preparation_service.rs | 2 +- validator_client/validator_services/src/sync.rs | 2 +- validator_manager/src/create_validators.rs | 4 ++-- validator_manager/src/delete_validators.rs | 2 +- validator_manager/src/import_validators.rs | 2 +- validator_manager/src/list_validators.rs | 2 +- validator_manager/src/move_validators.rs | 2 +- 61 files changed, 110 insertions(+), 138 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 80766d57b3..7bbb9ff74d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -573,7 +573,7 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()); let is_canonical = self .block_root_at_slot(block_slot, WhenSlotSkipped::None)? - .map_or(false, |canonical_root| block_root == &canonical_root); + .is_some_and(|canonical_root| block_root == &canonical_root); Ok(block_slot <= finalized_slot && is_canonical) } @@ -604,7 +604,7 @@ impl BeaconChain { let slot_is_finalized = state_slot <= finalized_slot; let canonical = self .state_root_at_slot(state_slot)? - .map_or(false, |canonical_root| state_root == &canonical_root); + .is_some_and(|canonical_root| state_root == &canonical_root); Ok(FinalizationAndCanonicity { slot_is_finalized, canonical, @@ -5118,9 +5118,9 @@ impl BeaconChain { .start_of(slot) .unwrap_or_else(|| Duration::from_secs(0)), ); - block_delays.observed.map_or(false, |delay| { - delay >= self.slot_clock.unagg_attestation_production_delay() - }) + block_delays + .observed + .is_some_and(|delay| delay >= self.slot_clock.unagg_attestation_production_delay()) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 4f92f5ec8f..4e21372efb 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1254,11 +1254,7 @@ pub fn find_reorg_slot( ($state: ident, $block_root: ident) => { std::iter::once(Ok(($state.slot(), $block_root))) .chain($state.rev_iter_block_roots(spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) + .skip_while(|result| result.as_ref().is_ok_and(|(slot, _)| *slot > lowest_slot)) }; } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 72806a74d2..f6002ea0ac 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -519,13 +519,13 @@ impl DataAvailabilityChecker { /// Returns true if the given epoch lies within the da boundary and false otherwise. pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { self.data_availability_boundary() - .map_or(false, |da_epoch| block_epoch >= da_epoch) + .is_some_and(|da_epoch| block_epoch >= da_epoch) } /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { - self.slot_clock.now().map_or(false, |slot| { - self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| { + self.slot_clock.now().is_some_and(|slot| { + self.spec.deneb_fork_epoch.is_some_and(|deneb_epoch| { let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); now_epoch >= deneb_epoch }) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 40361574af..5ce023038d 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -228,13 +228,10 @@ impl PendingComponents { ); let all_blobs_received = block_kzg_commitments_count_opt - .map_or(false, |num_expected_blobs| { - num_expected_blobs == num_received_blobs - }); + .is_some_and(|num_expected_blobs| num_expected_blobs == num_received_blobs); - let all_columns_received = expected_columns_opt.map_or(false, |num_expected_columns| { - num_expected_columns == num_received_columns - }); + let all_columns_received = expected_columns_opt + .is_some_and(|num_expected_columns| num_expected_columns == num_received_columns); all_blobs_received || all_columns_received } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 606610a748..c94ea0e941 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -145,7 +145,7 @@ impl EarlyAttesterCache { self.item .read() .as_ref() - .map_or(false, |item| item.beacon_block_root == block_root) + .is_some_and(|item| item.beacon_block_root == block_root) } /// Returns the block, if `block_root` matches the cached item. diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index cb6e4c34f3..ad4f106517 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -153,7 +153,7 @@ fn get_sync_status( // Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the // current voting period. let lighthouse_is_cached_and_ready = - latest_cached_block_timestamp.map_or(false, |t| t >= voting_target_timestamp); + latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp); Some(Eth1SyncStatusData { head_block_number, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 92d24c53c0..502a7918a1 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -127,9 +127,9 @@ impl PayloadNotifier { /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -async fn notify_new_payload<'a, T: BeaconChainTypes>( +async fn notify_new_payload( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, ) -> Result { let execution_layer = chain .execution_layer @@ -230,9 +230,9 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub async fn validate_merge_block<'a, T: BeaconChainTypes>( +pub async fn validate_merge_block( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError> { let spec = &chain.spec; diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 4373164d62..8692d374ed 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -293,10 +293,7 @@ mod tests { .await .unwrap(); - let version_bytes = std::cmp::min( - lighthouse_version::VERSION.as_bytes().len(), - GRAFFITI_BYTES_LEN, - ); + let version_bytes = std::cmp::min(lighthouse_version::VERSION.len(), GRAFFITI_BYTES_LEN); // grab the slice of the graffiti that corresponds to the lighthouse version let graffiti_slice = &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; @@ -361,7 +358,7 @@ mod tests { let graffiti_str = "nice graffiti bro"; let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; - graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes()); + graffiti_bytes[..graffiti_str.len()].copy_from_slice(graffiti_str.as_bytes()); let found_graffiti = harness .chain diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index dec012fb92..20ed36ace7 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -293,7 +293,7 @@ impl SlotHashSet { Ok(self .map .get(&root) - .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) + .is_some_and(|agg| agg.iter().any(|val| item.is_subset(val)))) } /// The number of observed items in `self`. diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index efb95f57a9..5bba8e4d8e 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -130,7 +130,7 @@ impl Item<()> for EpochBitfield { fn get(&self, validator_index: usize) -> Option<()> { self.bitfield .get(validator_index) - .map_or(false, |bit| *bit) + .is_some_and(|bit| *bit) .then_some(()) } } @@ -336,7 +336,7 @@ impl, E: EthSpec> AutoPruningEpochContainer { let exists = self .items .get(&epoch) - .map_or(false, |item| item.get(validator_index).is_some()); + .is_some_and(|item| item.get(validator_index).is_some()); Ok(exists) } diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 53f8c71f54..a9f4664064 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -118,7 +118,7 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .map_or(false, |indices| indices.contains(&data_sidecar.index())); + .is_some_and(|indices| indices.contains(&data_sidecar.index())); Ok(is_known) } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index da1d60db17..67ca72254b 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -253,7 +253,7 @@ impl BlockShufflingIds { } else if self .previous .as_ref() - .map_or(false, |id| id.shuffling_epoch == epoch) + .is_some_and(|id| id.shuffling_epoch == epoch) { self.previous.clone() } else if epoch == self.next.shuffling_epoch { diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 2a69b04c91..0edda2f95b 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -1022,7 +1022,7 @@ impl BeaconProcessor { let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() - .map_or(false, |event| event.drop_during_sync); + .is_some_and(|event| event.drop_during_sync); let idle_tx = idle_tx.clone(); let modified_queue_id = match work_event { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 7c6a253aca..24c6615822 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -910,7 +910,7 @@ where .forkchoice_update_parameters(); if params .head_hash - .map_or(false, |hash| hash != ExecutionBlockHash::zero()) + .is_some_and(|hash| hash != ExecutionBlockHash::zero()) { // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index f686c2c650..e88803e94f 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -197,7 +197,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -233,7 +233,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -339,9 +339,7 @@ async fn bellatrix_readiness_logging( .message() .body() .execution_payload() - .map_or(false, |payload| { - payload.parent_hash() != ExecutionBlockHash::zero() - }); + .is_ok_and(|payload| payload.parent_hash() != ExecutionBlockHash::zero()); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 33dc60d037..e2a81c072c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -158,9 +158,7 @@ pub mod deposit_log { }; let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); + .is_some_and(|(public_key, signature, msg)| signature.verify(&public_key, msg)); Ok(DepositLog { deposit_data, @@ -592,7 +590,7 @@ impl CachedResponse { /// returns `true` if the entry's age is >= age_limit pub fn older_than(&self, age_limit: Option) -> bool { - age_limit.map_or(false, |limit| self.age() >= limit) + age_limit.is_some_and(|limit| self.age() >= limit) } } @@ -720,9 +718,9 @@ impl HttpJsonRpc { .await } - pub async fn get_block_by_number<'a>( + pub async fn get_block_by_number( &self, - query: BlockByNumberQuery<'a>, + query: BlockByNumberQuery<'_>, ) -> Result, Error> { let params = json!([query, RETURN_FULL_TRANSACTION_OBJECTS]); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ae0dca9833..f3b12b21d1 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2095,7 +2095,7 @@ fn verify_builder_bid( payload: header.timestamp(), expected: payload_attributes.timestamp(), })) - } else if block_number.map_or(false, |n| n != header.block_number()) { + } else if block_number.is_some_and(|n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 5405fd7009..cf0be8ed0d 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -41,7 +41,7 @@ pub fn process_payload_status( PayloadStatusV1Status::Valid => { if response .latest_valid_hash - .map_or(false, |h| h == head_block_hash) + .is_some_and(|h| h == head_block_hash) { // The response is only valid if `latest_valid_hash` is not `null` and // equal to the provided `block_hash`. diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 48372a39be..dc90d91c0f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -318,7 +318,7 @@ impl MockExecutionLayer { (self, block_hash) } - pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self + pub async fn with_terminal_block(self, func: U) -> Self where U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 3981833a5c..b5f4bd50ee 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -270,7 +270,7 @@ impl Eth1GenesisService { // Ignore any block that has already been processed or update the highest processed // block. - if highest_processed_block.map_or(false, |highest| highest >= block.number) { + if highest_processed_block.is_some_and(|highest| highest >= block.number) { continue; } else { self.stats diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 23d177da78..febdf69259 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1164,7 +1164,7 @@ pub fn serve( .map_err(warp_utils::reject::beacon_chain_error)? // Ignore any skip-slots immediately following the parent. .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) + res.as_ref().is_ok_and(|(root, _)| *root != parent_root) }) .transpose() .map_err(warp_utils::reject::beacon_chain_error)? @@ -1249,7 +1249,7 @@ pub fn serve( let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error)? - .map_or(false, |canonical| root == canonical); + .is_some_and(|canonical| root == canonical); let data = api_types::BlockHeaderData { root, diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index 7f11ddd8f4..baa41e33ed 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -14,7 +14,7 @@ pub fn pubkey_to_validator_index( state .validators() .get(index) - .map_or(false, |v| v.pubkey == *pubkey) + .is_some_and(|v| v.pubkey == *pubkey) }) .map(Result::Ok) .transpose() diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index e45dcf221c..8cfcf5d93e 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -161,7 +161,7 @@ impl ForkChoiceUpdates { update .payload_attributes .as_ref() - .map_or(false, |payload_attributes| { + .is_some_and(|payload_attributes| { payload_attributes.timestamp() == proposal_timestamp }) }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7007a14466..1efe44a613 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1278,7 +1278,7 @@ impl ApiTester { .chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .unwrap() - .map_or(false, |canonical| block_root == canonical); + .is_some_and(|canonical| block_root == canonical); assert_eq!(result.canonical, canonical, "{:?}", block_id); assert_eq!(result.root, block_root, "{:?}", block_id); diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 537d2319c2..0d77e2cd0f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -124,7 +124,7 @@ impl BackoffStorage { pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) - .map_or(false, |m| m.contains_key(peer)) + .is_some_and(|m| m.contains_key(peer)) } pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index c4e20e4397..6528e737a3 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1770,8 +1770,7 @@ where // reject messages claiming to be from ourselves but not locally published let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { - own_id != propagation_source - && raw_message.source.as_ref().map_or(false, |s| s == own_id) + own_id != propagation_source && raw_message.source.as_ref() == Some(own_id) } else { self.published_message_ids.contains(msg_id) }; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 21f3dc830f..8a93b1185d 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -166,7 +166,7 @@ impl Config { tcp_port, }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) + self.discv5_config.table_filter = |enr| enr.ip4().as_ref().is_some_and(is_global_ipv4) } /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is @@ -187,7 +187,7 @@ impl Config { }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) + self.discv5_config.table_filter = |enr| enr.ip6().as_ref().is_some_and(is_global_ipv6) } /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and @@ -317,7 +317,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global_ipv4(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().is_some_and(|ip| is_global_ipv4(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 02ff0cc3ca..751f8dbb83 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -35,7 +35,7 @@ where .unwrap_or(false), Subnet::SyncCommittee(s) => sync_committee_bitfield .as_ref() - .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), + .is_ok_and(|b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { if let Ok(custody_subnet_count) = enr.custody_subnet_count::(&spec) { DataColumnSubnetId::compute_custody_subnets::( @@ -43,7 +43,7 @@ where custody_subnet_count, &spec, ) - .map_or(false, |mut subnets| subnets.contains(s)) + .is_ok_and(|mut subnets| subnets.contains(s)) } else { false } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index d2effd4d03..22a3df1ae8 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1305,7 +1305,7 @@ impl BannedPeersCount { pub fn ip_is_banned(&self, ip: &IpAddr) -> bool { self.banned_peers_per_ip .get(ip) - .map_or(false, |count| *count > BANNED_PEERS_PER_IP_THRESHOLD) + .is_some_and(|count| *count > BANNED_PEERS_PER_IP_THRESHOLD) } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index ee8c27f474..27c8463a55 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -99,7 +99,7 @@ impl PeerInfo { Subnet::SyncCommittee(id) => { return meta_data .syncnets() - .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) + .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)) } Subnet::DataColumn(column) => return self.custody_subnets.contains(column), } @@ -264,7 +264,7 @@ impl PeerInfo { /// Reports if this peer has some future validator duty in which case it is valuable to keep it. pub fn has_future_duty(&self) -> bool { - self.min_ttl.map_or(false, |i| i >= Instant::now()) + self.min_ttl.is_some_and(|i| i >= Instant::now()) } /// Returns score of the peer. diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f3c48e42f0..6b5753e96a 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -3129,7 +3129,7 @@ impl NetworkBeaconProcessor { .chain .slot_clock .now() - .map_or(false, |current_slot| sync_message_slot == current_slot); + .is_some_and(|current_slot| sync_message_slot == current_slot); self.propagate_if_timely(is_timely, message_id, peer_id) } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d01c73118c..d8183de752 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -186,7 +186,7 @@ impl OperationPool { self.sync_contributions.write().retain(|_, contributions| { // All the contributions in this bucket have the same data, so we only need to // check the first one. - contributions.first().map_or(false, |contribution| { + contributions.first().is_some_and(|contribution| { current_slot <= contribution.slot.saturating_add(Slot::new(1)) }) }); @@ -401,7 +401,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + .is_some_and(|validator| !validator.slashed) }, |slashing| slashing.as_inner().clone(), E::MaxProposerSlashings::to_usize(), @@ -484,7 +484,7 @@ impl OperationPool { validator.exit_epoch > head_state.finalized_checkpoint().epoch }, ) - .map_or(false, |indices| !indices.is_empty()); + .is_ok_and(|indices| !indices.is_empty()); signature_ok && slashing_ok }); @@ -583,9 +583,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_execution_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_execution_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), E::MaxBlsToExecutionChanges::to_usize(), @@ -609,9 +607,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_eth1_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), usize::MAX, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 27769a310a..955bd33b30 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -265,7 +265,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_bound) { + if end_slot.is_some_and(|end_slot| end_slot < freezer_upper_bound) { None } else { Some(Box::new(get_state()?)) @@ -306,7 +306,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> None => { // If the iterator has an end slot (inclusive) which has already been // covered by the (exclusive) frozen forwards iterator, then we're done! - if end_slot.map_or(false, |end_slot| iter.end_slot == end_slot + 1) { + if end_slot.is_some_and(|end_slot| iter.end_slot == end_slot + 1) { *self = Finished; return Ok(None); } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 9bec83a35c..2a3b208aae 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -111,7 +111,7 @@ where self.store_cold_state(&state_root, &state, &mut io_batch)?; let batch_complete = - num_blocks.map_or(false, |n_blocks| slot == lower_limit_slot + n_blocks as u64); + num_blocks.is_some_and(|n_blocks| slot == lower_limit_slot + n_blocks as u64); let reconstruction_complete = slot + 1 == upper_limit_slot; // Commit the I/O batch if: diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 5c1faa7f2f..96e4de4639 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -77,9 +77,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - state.slot() < finalized_state.state.slot() - }) + .is_some_and(|finalized_state| state.slot() < finalized_state.state.slot()) { return Err(Error::FinalizedStateDecreasingSlot); } @@ -127,9 +125,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - finalized_state.state_root == state_root - }) + .is_some_and(|finalized_state| finalized_state.state_root == state_root) { return Ok(PutStateOutcome::Finalized); } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index a4850fc1c6..24f6861daa 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -435,7 +435,7 @@ pub fn recursively_find_voting_keystores>( && dir_entry .file_name() .to_str() - .map_or(false, is_voting_keystore) + .is_some_and(is_voting_keystore) { matches.push(dir_entry.path()) } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 7fe7f79506..0ddd867c2f 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -204,7 +204,7 @@ impl TimeLatch { pub fn elapsed(&mut self) -> bool { let now = Instant::now(); - let is_elapsed = self.0.map_or(false, |elapse_time| now > elapse_time); + let is_elapsed = self.0.is_some_and(|elapse_time| now > elapse_time); if is_elapsed || self.0.is_none() { self.0 = Some(now + LOG_DEBOUNCE_INTERVAL); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 38ea141199..5d0bee4c85 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -468,7 +468,7 @@ impl ProtoArray { // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. let latest_valid_ancestor_is_descendant = - latest_valid_ancestor_root.map_or(false, |ancestor_root| { + latest_valid_ancestor_root.is_some_and(|ancestor_root| { self.is_descendant(ancestor_root, head_block_root) && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); @@ -505,13 +505,13 @@ impl ProtoArray { // head. if node .best_child - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_child = None } if node .best_descendant - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_descendant = None } @@ -999,7 +999,7 @@ impl ProtoArray { node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { - if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + if checkpoint.is_some_and(|cp| cp == self.finalized_checkpoint) { return true; } } @@ -1037,7 +1037,7 @@ impl ProtoArray { .find(|node| { node.execution_status .block_hash() - .map_or(false, |node_block_hash| node_block_hash == *block_hash) + .is_some_and(|node_block_hash| node_block_hash == *block_hash) }) .map(|node| node.root) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 00697def5d..ccff3d80c0 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -53,7 +53,7 @@ pub fn initialize_beacon_state_from_eth1( // https://github.com/ethereum/eth2.0-specs/pull/2323 if spec .altair_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_altair(&mut state, spec)?; @@ -63,7 +63,7 @@ pub fn initialize_beacon_state_from_eth1( // Similarly, perform an upgrade to the merge if configured from genesis. if spec .bellatrix_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderBellatrix::default() upgrade_to_bellatrix(&mut state, spec)?; @@ -81,7 +81,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to capella if configured from genesis if spec .capella_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_capella(&mut state, spec)?; @@ -98,7 +98,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to deneb if configured from genesis if spec .deneb_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_deneb(&mut state, spec)?; @@ -115,7 +115,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to electra if configured from genesis. if spec .electra_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { let post = upgrade_state_to_electra(&mut state, Epoch::new(0), Epoch::new(0), spec)?; state = post; @@ -153,7 +153,7 @@ pub fn initialize_beacon_state_from_eth1( pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state .get_active_validator_indices(E::genesis_epoch(), spec) - .map_or(false, |active_validators| { + .is_ok_and(|active_validators| { state.genesis_time() >= spec.min_genesis_time && active_validators.len() as u64 >= spec.min_genesis_active_validator_count }) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 210db4c9c1..08cfd9cba8 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -38,7 +38,7 @@ pub fn process_sync_aggregate( )?; // If signature set is `None` then the signature is valid (infinity). - if signature_set.map_or(false, |signature| !signature.verify()) { + if signature_set.is_some_and(|signature| !signature.verify()) { return Err(SyncAggregateInvalid::SignatureInvalid.into()); } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 952ab3f649..5508b80807 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -151,7 +151,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_current_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_current_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.current_epoch) } @@ -176,7 +176,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_current_epoch_target_attester)), + .is_some_and(|s| s.is_current_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_current_epoch_unslashed_participating_index( val_index, @@ -247,7 +247,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_previous_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_previous_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.previous_epoch) } @@ -267,7 +267,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_target_attester)), + .is_some_and(|s| s.is_previous_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, @@ -294,7 +294,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_head_attester)), + .is_some_and(|s| s.is_previous_epoch_head_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index(val_index, TIMELY_HEAD_FLAG_INDEX), } @@ -318,7 +318,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_attester)), + .is_some_and(|s| s.is_previous_epoch_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index b896dc4693..f7a701fed6 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -283,7 +283,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Return `true` if this block body has a non-zero number of blobs. pub fn has_blobs(self) -> bool { self.blob_kzg_commitments() - .map_or(false, |blobs| !blobs.is_empty()) + .is_ok_and(|blobs| !blobs.is_empty()) } pub fn attestations_len(&self) -> usize { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ad4484b86a..05f28744fa 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1856,7 +1856,7 @@ impl BeaconState { pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { let i = Self::committee_cache_index(relative_epoch); - self.committee_cache_at_index(i).map_or(false, |cache| { + self.committee_cache_at_index(i).is_ok_and(|cache| { cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) }) } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index fd5e51313f..bc258ef68d 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -145,7 +145,7 @@ impl ProgressiveBalancesCache { pub fn is_initialized_at(&self, epoch: Epoch) -> bool { self.inner .as_ref() - .map_or(false, |inner| inner.current_epoch == epoch) + .is_some_and(|inner| inner.current_epoch == epoch) } /// When a new target attestation has been processed, we update the cached diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 0b33a76ff1..9d3308cf23 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -420,16 +420,14 @@ impl ChainSpec { /// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`. pub fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - block_epoch >= eip7594_fork_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| block_epoch >= eip7594_fork_epoch) } /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. pub fn is_peer_das_scheduled(&self) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - eip7594_fork_epoch != self.far_future_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| eip7594_fork_epoch != self.far_future_epoch) } /// Returns a full `Fork` struct for a given epoch. diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index df1064daba..2f9df8758b 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -72,8 +72,7 @@ impl DepositTreeSnapshot { Some(Hash256::from_slice(&deposit_root)) } pub fn is_valid(&self) -> bool { - self.calculate_root() - .map_or(false, |calculated| self.deposit_root == calculated) + self.calculate_root() == Some(self.deposit_root) } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 08f8573c6d..f781aacabd 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -57,7 +57,7 @@ impl FromStr for GraffitiString { type Err = String; fn from_str(s: &str) -> Result { - if s.as_bytes().len() > GRAFFITI_BYTES_LEN { + if s.len() > GRAFFITI_BYTES_LEN { return Err(format!( "Graffiti exceeds max length {}", GRAFFITI_BYTES_LEN diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 94d95a0d1c..ecfa04fc81 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -223,7 +223,7 @@ pub fn run( .update_tree_hash_cache() .map_err(|e| format!("Unable to build THC: {:?}", e))?; - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), @@ -331,7 +331,7 @@ fn do_transition( .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; debug!("Initial tree hash: {:?}", t.elapsed()); - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 20b4a33771..e2b49dca29 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -406,7 +406,7 @@ impl SlasherDB { ) -> Result<(), Error> { // Don't update maximum if new target is less than or equal to previous. In the case of // no previous we *do* want to update. - if previous_max_target.map_or(false, |prev_max| max_target <= prev_max) { + if previous_max_target.is_some_and(|prev_max| max_target <= prev_max) { return Ok(()); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 427bcf5e9c..a1c74389a7 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -523,7 +523,7 @@ impl Tester { || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); - let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); + let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 54ca52447f..d8cade296b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -322,7 +322,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { @@ -377,7 +377,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 757b9bf3c4..eb88ac6af1 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -28,7 +28,7 @@ pub fn log_file_access>(file_accessed: P) { writeln!(&mut file, "{:?}", file_accessed.as_ref()).expect("should write to file"); - file.unlock().expect("unable to unlock file"); + fs2::FileExt::unlock(&file).expect("unable to unlock file"); } pub fn yaml_decode(string: &str) -> Result { diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index 35228fe354..4a593c2700 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -162,7 +162,7 @@ impl DoppelgangerState { /// If the BN fails to respond to either of these requests, simply return an empty response. /// This behaviour is to help prevent spurious failures on the BN from needlessly preventing /// doppelganger progression. -async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( +async fn beacon_node_liveness( beacon_nodes: Arc>, log: Logger, current_epoch: Epoch, diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index baaf930c68..71611339f9 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1113,9 +1113,7 @@ fn max_or(opt_x: Option, y: T) -> T { /// /// If prev is `None` and `new` is `Some` then `true` is returned. fn monotonic(new: Option, prev: Option) -> bool { - new.map_or(false, |new_val| { - prev.map_or(true, |prev_val| new_val >= prev_val) - }) + new.is_some_and(|new_val| prev.map_or(true, |prev_val| new_val >= prev_val)) } /// The result of importing a single entry from an interchange file. diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 480f4af2b3..fe6eab3a8a 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -258,7 +258,7 @@ impl PreparationService { .slot_clock .now() .map_or(E::genesis_epoch(), |slot| slot.epoch(E::slots_per_epoch())); - spec.bellatrix_fork_epoch.map_or(false, |fork_epoch| { + spec.bellatrix_fork_epoch.is_some_and(|fork_epoch| { current_epoch + PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS >= fork_epoch }) } diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index af501326f4..dd3e05088e 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -94,7 +94,7 @@ impl SyncDutiesMap { self.committees .read() .get(&committee_period) - .map_or(false, |committee_duties| { + .is_some_and(|committee_duties| { let validator_duties = committee_duties.validators.read(); validator_indices .iter() diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d4403b4613..b40fe61a82 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -286,7 +286,7 @@ struct ValidatorsAndDeposits { } impl ValidatorsAndDeposits { - async fn new<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result { + async fn new(config: CreateConfig, spec: &ChainSpec) -> Result { let CreateConfig { // The output path is handled upstream. output_path: _, @@ -545,7 +545,7 @@ pub async fn cli_run( } } -async fn run<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { +async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { let output_path = config.output_path.clone(); if !output_path.exists() { diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index a2d6c062fa..5ef647c5af 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -86,7 +86,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: DeleteConfig) -> Result<(), String> { +async fn run(config: DeleteConfig) -> Result<(), String> { let DeleteConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 3cebc10bb3..63c7ca4596 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -209,7 +209,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ImportConfig) -> Result<(), String> { +async fn run(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, keystore_file_path, diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index e3deb0b21a..a0a1c5fb40 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -58,7 +58,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ListConfig) -> Result, String> { +async fn run(config: ListConfig) -> Result, String> { let ListConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 4d0820f5a8..abac071673 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -277,7 +277,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: MoveConfig) -> Result<(), String> { +async fn run(config: MoveConfig) -> Result<(), String> { let MoveConfig { src_vc_url, src_vc_token_path, From a244aa3a6971572c65dd1c68c726547a1d38c033 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 10 Jan 2025 08:13:32 +0700 Subject: [PATCH 07/15] Add libssl install to udeps task (#6777) * Add libssl install to udeps task * Use HTTPS --- .github/workflows/test-suite.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 45f3b757e7..0ee9dbb622 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -392,6 +392,10 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Fetch libssl1.1 + run: wget https://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb + - name: Install libssl1.1 + run: sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config From 722573f7ed102bda56f2778984989151980a50ff Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Fri, 10 Jan 2025 13:25:20 +0800 Subject: [PATCH 08/15] Use oldest_block_slot to break off pruning payloads (#6745) * Use oldest_block_slot to break of pruning payloads * Update beacon_node/store/src/hot_cold_store.rs Co-authored-by: Michael Sproul * Merge remote-tracking branch 'origin/unstable' into anchor_slot_pruning --- beacon_node/store/src/hot_cold_store.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index da3e6d4ebc..c6148e5314 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -2629,7 +2629,7 @@ impl, Cold: ItemStore> HotColdDB "Pruning finalized payloads"; "info" => "you may notice degraded I/O performance while this runs" ); - let anchor_slot = self.get_anchor_info().anchor_slot; + let anchor_info = self.get_anchor_info(); let mut ops = vec![]; let mut last_pruned_block_root = None; @@ -2670,10 +2670,10 @@ impl, Cold: ItemStore> HotColdDB ops.push(StoreOp::DeleteExecutionPayload(block_root)); } - if slot == anchor_slot { + if slot <= anchor_info.oldest_block_slot { info!( self.log, - "Payload pruning reached anchor state"; + "Payload pruning reached anchor oldest block slot"; "slot" => slot ); break; From ecdf2d891fb78d84e500ca8cf3a9deb9706263cf Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 10 Jan 2025 09:25:23 +0400 Subject: [PATCH 09/15] Add Fulu boilerplate (#6695) * Add Fulu boilerplate * Add more boilerplate * Change fulu_time to osaka_time * Merge branch 'unstable' into fulu-boilerplate * Fix tests * Merge branch 'unstable' into fulu-boilerplate * More test fixes * Apply suggestions * Remove `get_payload` boilerplate * Add lightclient fulu types and fix beacon-chain-tests * Disable Fulu in ef-tests * Reduce boilerplate for future forks * Small fixes * One more fix * Apply suggestions * Merge branch 'unstable' into fulu-boilerplate * Fix lints --- Makefile | 2 +- .../beacon_chain/src/attestation_rewards.rs | 11 +- .../beacon_chain/src/beacon_block_streamer.rs | 9 +- beacon_node/beacon_chain/src/beacon_chain.rs | 72 ++++++-- .../beacon_chain/src/execution_payload.rs | 20 +-- .../beacon_chain/src/fulu_readiness.rs | 114 ++++++++++++ beacon_node/beacon_chain/src/lib.rs | 1 + beacon_node/beacon_chain/src/test_utils.rs | 52 ++++-- .../beacon_chain/tests/block_verification.rs | 9 + beacon_node/beacon_chain/tests/store_tests.rs | 1 + .../beacon_chain/tests/validator_monitor.rs | 1 + beacon_node/client/src/notifier.rs | 58 ++++++ beacon_node/execution_layer/src/engine_api.rs | 60 ++++++- .../execution_layer/src/engine_api/http.rs | 90 ++++++++-- .../src/engine_api/json_structures.rs | 87 ++++++++- .../src/engine_api/new_payload_request.rs | 29 ++- beacon_node/execution_layer/src/lib.rs | 13 +- .../test_utils/execution_block_generator.rs | 49 ++++- .../src/test_utils/handle_rpc.rs | 99 ++++++++++- .../src/test_utils/mock_builder.rs | 54 +++++- .../src/test_utils/mock_execution_layer.rs | 3 + .../execution_layer/src/test_utils/mod.rs | 9 + .../tests/broadcast_validation_tests.rs | 36 ++-- beacon_node/http_api/tests/tests.rs | 4 +- .../lighthouse_network/src/rpc/codec.rs | 106 ++++++++--- .../lighthouse_network/src/rpc/protocol.rs | 38 +++- .../lighthouse_network/src/types/pubsub.rs | 9 +- .../lighthouse_network/src/types/topics.rs | 1 + .../lighthouse_network/tests/common.rs | 3 + beacon_node/network/src/sync/tests/lookups.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 71 +++----- beacon_node/src/lib.rs | 1 + .../store/src/impls/execution_payload.rs | 23 ++- beacon_node/store/src/partial_beacon_state.rs | 66 +++++-- common/eth2/src/types.rs | 33 ++-- .../chiado/config.yaml | 9 +- .../gnosis/config.yaml | 9 +- .../holesky/config.yaml | 7 +- .../mainnet/config.yaml | 20 +-- .../sepolia/config.yaml | 2 +- consensus/fork_choice/src/fork_choice.rs | 14 +- consensus/fork_choice/tests/tests.rs | 2 +- .../common/get_attestation_participation.rs | 21 +-- .../src/common/slash_validator.rs | 13 +- consensus/state_processing/src/genesis.rs | 20 ++- .../src/per_block_processing.rs | 119 +++++++------ .../process_operations.rs | 39 ++-- .../per_block_processing/signature_sets.rs | 22 ++- .../verify_attestation.rs | 23 +-- .../src/per_epoch_processing.rs | 11 +- .../src/per_slot_processing.rs | 7 +- consensus/state_processing/src/upgrade.rs | 2 + .../state_processing/src/upgrade/fulu.rs | 94 ++++++++++ consensus/types/presets/gnosis/fulu.yaml | 3 + consensus/types/presets/mainnet/fulu.yaml | 3 + consensus/types/presets/minimal/fulu.yaml | 3 + consensus/types/src/beacon_block.rs | 163 ++++++++++++++++- consensus/types/src/beacon_block_body.rs | 124 ++++++++++++- consensus/types/src/beacon_state.rs | 167 ++++++++++++++---- .../progressive_balances_cache.rs | 9 +- consensus/types/src/builder_bid.rs | 12 +- consensus/types/src/chain_spec.rs | 74 ++++++-- consensus/types/src/config_and_preset.rs | 37 ++-- consensus/types/src/execution_payload.rs | 24 ++- .../types/src/execution_payload_header.rs | 91 ++++++++-- consensus/types/src/fork_context.rs | 7 + consensus/types/src/fork_name.rs | 32 +++- consensus/types/src/lib.rs | 36 ++-- consensus/types/src/light_client_bootstrap.rs | 27 ++- .../types/src/light_client_finality_update.rs | 33 +++- consensus/types/src/light_client_header.rs | 60 ++++++- .../src/light_client_optimistic_update.rs | 23 ++- consensus/types/src/light_client_update.rs | 47 ++++- consensus/types/src/payload.rs | 44 ++++- consensus/types/src/preset.rs | 18 ++ consensus/types/src/signed_beacon_block.rs | 75 +++++++- lcli/src/mock_el.rs | 2 + testing/ef_tests/src/cases/common.rs | 1 + .../ef_tests/src/cases/epoch_processing.rs | 129 ++++++-------- testing/ef_tests/src/cases/fork.rs | 3 +- .../src/cases/merkle_proof_validity.rs | 8 +- testing/ef_tests/src/cases/operations.rs | 49 +++-- testing/ef_tests/src/cases/transition.rs | 8 + testing/ef_tests/src/handler.rs | 10 +- testing/simulator/src/basic_sim.rs | 4 +- testing/simulator/src/fallback_sim.rs | 4 +- testing/simulator/src/local_network.rs | 5 + .../beacon_node_fallback/src/lib.rs | 8 + validator_client/http_api/src/test_utils.rs | 4 +- validator_client/http_api/src/tests.rs | 4 +- .../signing_method/src/web3signer.rs | 6 + 91 files changed, 2359 insertions(+), 668 deletions(-) create mode 100644 beacon_node/beacon_chain/src/fulu_readiness.rs create mode 100644 consensus/state_processing/src/upgrade/fulu.rs create mode 100644 consensus/types/presets/gnosis/fulu.yaml create mode 100644 consensus/types/presets/mainnet/fulu.yaml create mode 100644 consensus/types/presets/minimal/fulu.yaml diff --git a/Makefile b/Makefile index 8faf8a2e54..4d95f50c5c 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ PROFILE ?= release # List of all hard forks. This list is used to set env variables for several tests so that # they run for different forks. -FORKS=phase0 altair bellatrix capella deneb electra +FORKS=phase0 altair bellatrix capella deneb electra fulu # Extra flags for Cargo CARGO_INSTALL_EXTRA_FLAGS?= diff --git a/beacon_node/beacon_chain/src/attestation_rewards.rs b/beacon_node/beacon_chain/src/attestation_rewards.rs index 87b7384ea6..3b37b09e40 100644 --- a/beacon_node/beacon_chain/src/attestation_rewards.rs +++ b/beacon_node/beacon_chain/src/attestation_rewards.rs @@ -51,13 +51,10 @@ impl BeaconChain { .get_state(&state_root, Some(state_slot))? .ok_or(BeaconChainError::MissingBeaconState(state_root))?; - match state { - BeaconState::Base(_) => self.compute_attestation_rewards_base(state, validators), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => self.compute_attestation_rewards_altair(state, validators), + if state.fork_name_unchecked().altair_enabled() { + self.compute_attestation_rewards_altair(state, validators) + } else { + self.compute_attestation_rewards_base(state, validators) } } diff --git a/beacon_node/beacon_chain/src/beacon_block_streamer.rs b/beacon_node/beacon_chain/src/beacon_block_streamer.rs index b76dba88fd..32ec776868 100644 --- a/beacon_node/beacon_chain/src/beacon_block_streamer.rs +++ b/beacon_node/beacon_chain/src/beacon_block_streamer.rs @@ -15,7 +15,7 @@ use types::{ }; use types::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadElectra, - ExecutionPayloadHeader, + ExecutionPayloadFulu, ExecutionPayloadHeader, }; #[derive(PartialEq)] @@ -99,6 +99,7 @@ fn reconstruct_default_header_block( ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), + ForkName::Fulu => ExecutionPayloadFulu::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::PayloadReconstruction(format!( "Block with fork variant {} has execution payload", @@ -742,13 +743,14 @@ mod tests { } #[tokio::test] - async fn check_all_blocks_from_altair_to_electra() { + async fn check_all_blocks_from_altair_to_fulu() { let slots_per_epoch = MinimalEthSpec::slots_per_epoch() as usize; - let num_epochs = 10; + let num_epochs = 12; let bellatrix_fork_epoch = 2usize; let capella_fork_epoch = 4usize; let deneb_fork_epoch = 6usize; let electra_fork_epoch = 8usize; + let fulu_fork_epoch = 10usize; let num_blocks_produced = num_epochs * slots_per_epoch; let mut spec = test_spec::(); @@ -757,6 +759,7 @@ mod tests { spec.capella_fork_epoch = Some(Epoch::new(capella_fork_epoch as u64)); spec.deneb_fork_epoch = Some(Epoch::new(deneb_fork_epoch as u64)); spec.electra_fork_epoch = Some(Epoch::new(electra_fork_epoch as u64)); + spec.fulu_fork_epoch = Some(Epoch::new(fulu_fork_epoch as u64)); let spec = Arc::new(spec); let harness = get_harness(VALIDATOR_COUNT, spec.clone()); diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 7bbb9ff74d..d84cd9615a 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -5317,23 +5317,19 @@ impl BeaconChain { // If required, start the process of loading an execution payload from the EL early. This // allows it to run concurrently with things like attestation packing. - let prepare_payload_handle = match &state { - BeaconState::Base(_) | BeaconState::Altair(_) => None, - BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - let prepare_payload_handle = get_execution_payload( - self.clone(), - &state, - parent_root, - proposer_index, - builder_params, - builder_boost_factor, - block_production_version, - )?; - Some(prepare_payload_handle) - } + let prepare_payload_handle = if state.fork_name_unchecked().bellatrix_enabled() { + let prepare_payload_handle = get_execution_payload( + self.clone(), + &state, + parent_root, + proposer_index, + builder_params, + builder_boost_factor, + block_production_version, + )?; + Some(prepare_payload_handle) + } else { + None }; let (mut proposer_slashings, mut attester_slashings, mut voluntary_exits) = @@ -5751,6 +5747,48 @@ impl BeaconChain { execution_payload_value, ) } + BeaconState::Fulu(_) => { + let ( + payload, + kzg_commitments, + maybe_blobs_and_proofs, + maybe_requests, + execution_payload_value, + ) = block_contents + .ok_or(BlockProductionError::MissingExecutionPayload)? + .deconstruct(); + + ( + BeaconBlock::Fulu(BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings: proposer_slashings.into(), + attester_slashings: attester_slashings_electra.into(), + attestations: attestations_electra.into(), + deposits: deposits.into(), + voluntary_exits: voluntary_exits.into(), + sync_aggregate: sync_aggregate + .ok_or(BlockProductionError::MissingSyncAggregate)?, + execution_payload: payload + .try_into() + .map_err(|_| BlockProductionError::InvalidPayloadFork)?, + bls_to_execution_changes: bls_to_execution_changes.into(), + blob_kzg_commitments: kzg_commitments + .ok_or(BlockProductionError::InvalidPayloadFork)?, + execution_requests: maybe_requests + .ok_or(BlockProductionError::MissingExecutionRequests)?, + }, + }), + maybe_blobs_and_proofs, + execution_payload_value, + ) + } }; let block = SignedBeaconBlock::from_block( diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 502a7918a1..720f98e298 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -374,19 +374,15 @@ pub fn get_execution_payload( let latest_execution_payload_header = state.latest_execution_payload_header()?; let latest_execution_payload_header_block_hash = latest_execution_payload_header.block_hash(); let latest_execution_payload_header_gas_limit = latest_execution_payload_header.gas_limit(); - let withdrawals = match state { - &BeaconState::Capella(_) | &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - Some(get_expected_withdrawals(state, spec)?.0.into()) - } - &BeaconState::Bellatrix(_) => None, - // These shouldn't happen but they're here to make the pattern irrefutable - &BeaconState::Base(_) | &BeaconState::Altair(_) => None, + let withdrawals = if state.fork_name_unchecked().capella_enabled() { + Some(get_expected_withdrawals(state, spec)?.0.into()) + } else { + None }; - let parent_beacon_block_root = match state { - BeaconState::Deneb(_) | BeaconState::Electra(_) => Some(parent_block_root), - BeaconState::Bellatrix(_) | BeaconState::Capella(_) => None, - // These shouldn't happen but they're here to make the pattern irrefutable - BeaconState::Base(_) | BeaconState::Altair(_) => None, + let parent_beacon_block_root = if state.fork_name_unchecked().deneb_enabled() { + Some(parent_block_root) + } else { + None }; // Spawn a task to obtain the execution payload from the EL via a series of async calls. The diff --git a/beacon_node/beacon_chain/src/fulu_readiness.rs b/beacon_node/beacon_chain/src/fulu_readiness.rs new file mode 100644 index 0000000000..71494623f8 --- /dev/null +++ b/beacon_node/beacon_chain/src/fulu_readiness.rs @@ -0,0 +1,114 @@ +//! Provides tools for checking if a node is ready for the Fulu upgrade. + +use crate::{BeaconChain, BeaconChainTypes}; +use execution_layer::http::{ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V5}; +use serde::{Deserialize, Serialize}; +use std::fmt; +use std::time::Duration; +use types::*; + +/// The time before the Fulu fork when we will start issuing warnings about preparation. +use super::bellatrix_readiness::SECONDS_IN_A_WEEK; +pub const FULU_READINESS_PREPARATION_SECONDS: u64 = SECONDS_IN_A_WEEK * 2; +pub const ENGINE_CAPABILITIES_REFRESH_INTERVAL: u64 = 300; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(rename_all = "snake_case")] +#[serde(tag = "type")] +pub enum FuluReadiness { + /// The execution engine is fulu-enabled (as far as we can tell) + Ready, + /// We are connected to an execution engine which doesn't support the V5 engine api methods + V5MethodsNotSupported { error: String }, + /// The transition configuration with the EL failed, there might be a problem with + /// connectivity, authentication or a difference in configuration. + ExchangeCapabilitiesFailed { error: String }, + /// The user has not configured an execution endpoint + NoExecutionEndpoint, +} + +impl fmt::Display for FuluReadiness { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + FuluReadiness::Ready => { + write!(f, "This node appears ready for Fulu.") + } + FuluReadiness::ExchangeCapabilitiesFailed { error } => write!( + f, + "Could not exchange capabilities with the \ + execution endpoint: {}", + error + ), + FuluReadiness::NoExecutionEndpoint => write!( + f, + "The --execution-endpoint flag is not specified, this is a \ + requirement post-merge" + ), + FuluReadiness::V5MethodsNotSupported { error } => write!( + f, + "Execution endpoint does not support Fulu methods: {}", + error + ), + } + } +} + +impl BeaconChain { + /// Returns `true` if fulu epoch is set and Fulu fork has occurred or will + /// occur within `FULU_READINESS_PREPARATION_SECONDS` + pub fn is_time_to_prepare_for_fulu(&self, current_slot: Slot) -> bool { + if let Some(fulu_epoch) = self.spec.fulu_fork_epoch { + let fulu_slot = fulu_epoch.start_slot(T::EthSpec::slots_per_epoch()); + let fulu_readiness_preparation_slots = + FULU_READINESS_PREPARATION_SECONDS / self.spec.seconds_per_slot; + // Return `true` if Fulu has happened or is within the preparation time. + current_slot + fulu_readiness_preparation_slots > fulu_slot + } else { + // The Fulu fork epoch has not been defined yet, no need to prepare. + false + } + } + + /// Attempts to connect to the EL and confirm that it is ready for fulu. + pub async fn check_fulu_readiness(&self) -> FuluReadiness { + if let Some(el) = self.execution_layer.as_ref() { + match el + .get_engine_capabilities(Some(Duration::from_secs( + ENGINE_CAPABILITIES_REFRESH_INTERVAL, + ))) + .await + { + Err(e) => { + // The EL was either unreachable or responded with an error + FuluReadiness::ExchangeCapabilitiesFailed { + error: format!("{:?}", e), + } + } + Ok(capabilities) => { + let mut missing_methods = String::from("Required Methods Unsupported:"); + let mut all_good = true; + if !capabilities.get_payload_v5 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_GET_PAYLOAD_V5); + all_good = false; + } + if !capabilities.new_payload_v5 { + missing_methods.push(' '); + missing_methods.push_str(ENGINE_NEW_PAYLOAD_V5); + all_good = false; + } + + if all_good { + FuluReadiness::Ready + } else { + FuluReadiness::V5MethodsNotSupported { + error: missing_methods, + } + } + } + } + } else { + FuluReadiness::NoExecutionEndpoint + } + } +} diff --git a/beacon_node/beacon_chain/src/lib.rs b/beacon_node/beacon_chain/src/lib.rs index d9728b9fd4..4783945eb1 100644 --- a/beacon_node/beacon_chain/src/lib.rs +++ b/beacon_node/beacon_chain/src/lib.rs @@ -31,6 +31,7 @@ pub mod execution_payload; pub mod fetch_blobs; pub mod fork_choice_signal; pub mod fork_revert; +pub mod fulu_readiness; pub mod graffiti_calculator; mod head_tracker; pub mod historical_blocks; diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 093ee0c44b..d37398e4e0 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -501,6 +501,9 @@ where spec.electra_fork_epoch.map(|epoch| { genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + mock.server.execution_block_generator().osaka_time = spec.fulu_fork_epoch.map(|epoch| { + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); self } @@ -623,6 +626,9 @@ pub fn mock_execution_layer_from_parts( let prague_time = spec.electra_fork_epoch.map(|epoch| { HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); + let osaka_time = spec.fulu_fork_epoch.map(|epoch| { + HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() + }); let kzg = get_kzg(spec); @@ -632,6 +638,7 @@ pub fn mock_execution_layer_from_parts( shanghai_time, cancun_time, prague_time, + osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec.clone(), Some(kzg), @@ -913,15 +920,12 @@ where &self.spec, )); - let block_contents: SignedBlockContentsTuple = match *signed_block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { + let block_contents: SignedBlockContentsTuple = + if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) - } - }; + } else { + (signed_block, None) + }; (block_contents, block_response.state) } @@ -977,15 +981,12 @@ where &self.spec, )); - let block_contents: SignedBlockContentsTuple = match *signed_block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => (signed_block, None), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => { + let block_contents: SignedBlockContentsTuple = + if signed_block.fork_name_unchecked().deneb_enabled() { (signed_block, block_response.blob_items) - } - }; + } else { + (signed_block, None) + }; (block_contents, pre_state) } @@ -2863,6 +2864,25 @@ pub fn generate_rand_block_and_blobs( message.body.blob_kzg_commitments = bundle.commitments.clone(); bundle } + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { + ref mut message, .. + }) => { + // Get either zero blobs or a random number of blobs between 1 and Max Blobs. + let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; + let num_blobs = match num_blobs { + NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Number(n) => n, + NumBlobs::None => 0, + }; + let (bundle, transactions) = + execution_layer::test_utils::generate_blobs::(num_blobs).unwrap(); + payload.execution_payload.transactions = <_>::default(); + for tx in Vec::from(transactions) { + payload.execution_payload.transactions.push(tx).unwrap(); + } + message.body.blob_kzg_commitments = bundle.commitments.clone(); + bundle + } _ => return (block, blob_sidecars), }; diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index f094a173ee..103734b224 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -754,6 +754,11 @@ async fn invalid_signature_attester_slashing() { .push(attester_slashing.as_electra().unwrap().clone()) .expect("should update attester slashing"); } + BeaconBlockBodyRefMut::Fulu(ref mut blk) => { + blk.attester_slashings + .push(attester_slashing.as_electra().unwrap().clone()) + .expect("should update attester slashing"); + } } snapshots[block_index].beacon_block = Arc::new(SignedBeaconBlock::from_block(block, signature)); @@ -809,6 +814,10 @@ async fn invalid_signature_attestation() { .attestations .get_mut(0) .map(|att| att.signature = junk_aggregate_signature()), + BeaconBlockBodyRefMut::Fulu(ref mut blk) => blk + .attestations + .get_mut(0) + .map(|att| att.signature = junk_aggregate_signature()), }; if block.body().attestations_len() > 0 { diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index e1258ccdea..ed97b8d634 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -147,6 +147,7 @@ async fn light_client_bootstrap_test() { LightClientBootstrap::Capella(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Deneb(lc_bootstrap) => lc_bootstrap.header.beacon.slot, LightClientBootstrap::Electra(lc_bootstrap) => lc_bootstrap.header.beacon.slot, + LightClientBootstrap::Fulu(lc_bootstrap) => lc_bootstrap.header.beacon.slot, }; assert_eq!( diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index b4a54d2667..91de4fe270 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -214,6 +214,7 @@ async fn produces_missed_blocks() { ForkName::Capella => 11, ForkName::Deneb => 3, ForkName::Electra => 1, + ForkName::Fulu => 6, }; let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index e88803e94f..0c3b1578d6 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -4,6 +4,7 @@ use beacon_chain::{ capella_readiness::CapellaReadiness, deneb_readiness::DenebReadiness, electra_readiness::ElectraReadiness, + fulu_readiness::FuluReadiness, BeaconChain, BeaconChainTypes, ExecutionStatus, }; use lighthouse_network::{types::SyncState, NetworkGlobals}; @@ -315,6 +316,7 @@ pub fn spawn_notifier( capella_readiness_logging(current_slot, &beacon_chain, &log).await; deneb_readiness_logging(current_slot, &beacon_chain, &log).await; electra_readiness_logging(current_slot, &beacon_chain, &log).await; + fulu_readiness_logging(current_slot, &beacon_chain, &log).await; } }; @@ -586,6 +588,62 @@ async fn electra_readiness_logging( } } +/// Provides some helpful logging to users to indicate if their node is ready for Fulu. +async fn fulu_readiness_logging( + current_slot: Slot, + beacon_chain: &BeaconChain, + log: &Logger, +) { + let fulu_completed = beacon_chain + .canonical_head + .cached_head() + .snapshot + .beacon_state + .fork_name_unchecked() + .fulu_enabled(); + + let has_execution_layer = beacon_chain.execution_layer.is_some(); + + if fulu_completed && has_execution_layer + || !beacon_chain.is_time_to_prepare_for_fulu(current_slot) + { + return; + } + + if fulu_completed && !has_execution_layer { + error!( + log, + "Execution endpoint required"; + "info" => "you need a Fulu enabled execution engine to validate blocks." + ); + return; + } + + match beacon_chain.check_fulu_readiness().await { + FuluReadiness::Ready => { + info!( + log, + "Ready for Fulu"; + "info" => "ensure the execution endpoint is updated to the latest Fulu release" + ) + } + readiness @ FuluReadiness::ExchangeCapabilitiesFailed { error: _ } => { + error!( + log, + "Not ready for Fulu"; + "hint" => "the execution endpoint may be offline", + "info" => %readiness, + ) + } + readiness => warn!( + log, + "Not ready for Fulu"; + "hint" => "try updating the execution endpoint", + "info" => %readiness, + ), + } +} + async fn genesis_execution_payload_logging( beacon_chain: &BeaconChain, log: &Logger, diff --git a/beacon_node/execution_layer/src/engine_api.rs b/beacon_node/execution_layer/src/engine_api.rs index 083aaf2e25..b9d878b1f8 100644 --- a/beacon_node/execution_layer/src/engine_api.rs +++ b/beacon_node/execution_layer/src/engine_api.rs @@ -3,8 +3,8 @@ use crate::http::{ ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, ENGINE_GET_BLOBS_V1, ENGINE_GET_CLIENT_VERSION_V1, ENGINE_GET_PAYLOAD_BODIES_BY_HASH_V1, ENGINE_GET_PAYLOAD_BODIES_BY_RANGE_V1, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, - ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V1, ENGINE_NEW_PAYLOAD_V2, - ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, ENGINE_GET_PAYLOAD_V5, ENGINE_NEW_PAYLOAD_V1, + ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, ENGINE_NEW_PAYLOAD_V5, }; use eth2::types::{ BlobsBundle, SsePayloadAttributes, SsePayloadAttributesV1, SsePayloadAttributesV2, @@ -24,7 +24,7 @@ pub use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionRequests, KzgProofs, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionRequests, KzgProofs, }; use types::{Graffiti, GRAFFITI_BYTES_LEN}; @@ -35,7 +35,7 @@ mod new_payload_request; pub use new_payload_request::{ NewPayloadRequest, NewPayloadRequestBellatrix, NewPayloadRequestCapella, - NewPayloadRequestDeneb, NewPayloadRequestElectra, + NewPayloadRequestDeneb, NewPayloadRequestElectra, NewPayloadRequestFulu, }; pub const LATEST_TAG: &str = "latest"; @@ -261,7 +261,7 @@ pub struct ProposeBlindedBlockResponse { } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -281,12 +281,14 @@ pub struct GetPayloadResponse { pub execution_payload: ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: ExecutionPayloadElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: ExecutionPayloadFulu, pub block_value: Uint256, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blobs_bundle: BlobsBundle, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] pub should_override_builder: bool, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub requests: ExecutionRequests, } @@ -354,6 +356,12 @@ impl From> Some(inner.blobs_bundle), Some(inner.requests), ), + GetPayloadResponse::Fulu(inner) => ( + ExecutionPayload::Fulu(inner.execution_payload), + inner.block_value, + Some(inner.blobs_bundle), + Some(inner.requests), + ), } } } @@ -487,6 +495,34 @@ impl ExecutionPayloadBodyV1 { )) } } + ExecutionPayloadHeader::Fulu(header) => { + if let Some(withdrawals) = self.withdrawals { + Ok(ExecutionPayload::Fulu(ExecutionPayloadFulu { + parent_hash: header.parent_hash, + fee_recipient: header.fee_recipient, + state_root: header.state_root, + receipts_root: header.receipts_root, + logs_bloom: header.logs_bloom, + prev_randao: header.prev_randao, + block_number: header.block_number, + gas_limit: header.gas_limit, + gas_used: header.gas_used, + timestamp: header.timestamp, + extra_data: header.extra_data, + base_fee_per_gas: header.base_fee_per_gas, + block_hash: header.block_hash, + transactions: self.transactions, + withdrawals, + blob_gas_used: header.blob_gas_used, + excess_blob_gas: header.excess_blob_gas, + })) + } else { + Err(format!( + "block {} is post capella but payload body doesn't have withdrawals", + header.block_hash + )) + } + } } } } @@ -497,6 +533,7 @@ pub struct EngineCapabilities { pub new_payload_v2: bool, pub new_payload_v3: bool, pub new_payload_v4: bool, + pub new_payload_v5: bool, pub forkchoice_updated_v1: bool, pub forkchoice_updated_v2: bool, pub forkchoice_updated_v3: bool, @@ -506,6 +543,7 @@ pub struct EngineCapabilities { pub get_payload_v2: bool, pub get_payload_v3: bool, pub get_payload_v4: bool, + pub get_payload_v5: bool, pub get_client_version_v1: bool, pub get_blobs_v1: bool, } @@ -525,6 +563,9 @@ impl EngineCapabilities { if self.new_payload_v4 { response.push(ENGINE_NEW_PAYLOAD_V4); } + if self.new_payload_v5 { + response.push(ENGINE_NEW_PAYLOAD_V5); + } if self.forkchoice_updated_v1 { response.push(ENGINE_FORKCHOICE_UPDATED_V1); } @@ -552,6 +593,9 @@ impl EngineCapabilities { if self.get_payload_v4 { response.push(ENGINE_GET_PAYLOAD_V4); } + if self.get_payload_v5 { + response.push(ENGINE_GET_PAYLOAD_V5); + } if self.get_client_version_v1 { response.push(ENGINE_GET_CLIENT_VERSION_V1); } diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index e2a81c072c..1fd9f81d46 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -35,12 +35,14 @@ pub const ENGINE_NEW_PAYLOAD_V1: &str = "engine_newPayloadV1"; pub const ENGINE_NEW_PAYLOAD_V2: &str = "engine_newPayloadV2"; pub const ENGINE_NEW_PAYLOAD_V3: &str = "engine_newPayloadV3"; pub const ENGINE_NEW_PAYLOAD_V4: &str = "engine_newPayloadV4"; +pub const ENGINE_NEW_PAYLOAD_V5: &str = "engine_newPayloadV5"; pub const ENGINE_NEW_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(8); pub const ENGINE_GET_PAYLOAD_V1: &str = "engine_getPayloadV1"; pub const ENGINE_GET_PAYLOAD_V2: &str = "engine_getPayloadV2"; pub const ENGINE_GET_PAYLOAD_V3: &str = "engine_getPayloadV3"; pub const ENGINE_GET_PAYLOAD_V4: &str = "engine_getPayloadV4"; +pub const ENGINE_GET_PAYLOAD_V5: &str = "engine_getPayloadV5"; pub const ENGINE_GET_PAYLOAD_TIMEOUT: Duration = Duration::from_secs(2); pub const ENGINE_FORKCHOICE_UPDATED_V1: &str = "engine_forkchoiceUpdatedV1"; @@ -72,10 +74,12 @@ pub static LIGHTHOUSE_CAPABILITIES: &[&str] = &[ ENGINE_NEW_PAYLOAD_V2, ENGINE_NEW_PAYLOAD_V3, ENGINE_NEW_PAYLOAD_V4, + ENGINE_NEW_PAYLOAD_V5, ENGINE_GET_PAYLOAD_V1, ENGINE_GET_PAYLOAD_V2, ENGINE_GET_PAYLOAD_V3, ENGINE_GET_PAYLOAD_V4, + ENGINE_GET_PAYLOAD_V5, ENGINE_FORKCHOICE_UPDATED_V1, ENGINE_FORKCHOICE_UPDATED_V2, ENGINE_FORKCHOICE_UPDATED_V3, @@ -825,6 +829,30 @@ impl HttpJsonRpc { Ok(response.into()) } + pub async fn new_payload_v5_fulu( + &self, + new_payload_request_fulu: NewPayloadRequestFulu<'_, E>, + ) -> Result { + let params = json!([ + JsonExecutionPayload::V5(new_payload_request_fulu.execution_payload.clone().into()), + new_payload_request_fulu.versioned_hashes, + new_payload_request_fulu.parent_beacon_block_root, + new_payload_request_fulu + .execution_requests + .get_execution_requests_list(), + ]); + + let response: JsonPayloadStatusV1 = self + .rpc_request( + ENGINE_NEW_PAYLOAD_V5, + params, + ENGINE_NEW_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + + Ok(response.into()) + } + pub async fn get_payload_v1( &self, payload_id: PayloadId, @@ -880,9 +908,10 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base | ForkName::Altair | ForkName::Deneb | ForkName::Electra => Err( - Error::UnsupportedForkVariant(format!("called get_payload_v2 with {}", fork_name)), - ), + _ => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v2 with {}", + fork_name + ))), } } @@ -906,11 +935,7 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Electra => Err(Error::UnsupportedForkVariant(format!( + _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v3 with {}", fork_name ))), @@ -937,17 +962,40 @@ impl HttpJsonRpc { .try_into() .map_err(Error::BadResponse) } - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb => Err(Error::UnsupportedForkVariant(format!( + _ => Err(Error::UnsupportedForkVariant(format!( "called get_payload_v4 with {}", fork_name ))), } } + pub async fn get_payload_v5( + &self, + fork_name: ForkName, + payload_id: PayloadId, + ) -> Result, Error> { + let params = json!([JsonPayloadIdRequest::from(payload_id)]); + + match fork_name { + ForkName::Fulu => { + let response: JsonGetPayloadResponseV5 = self + .rpc_request( + ENGINE_GET_PAYLOAD_V5, + params, + ENGINE_GET_PAYLOAD_TIMEOUT * self.execution_timeout_multiplier, + ) + .await?; + JsonGetPayloadResponse::V5(response) + .try_into() + .map_err(Error::BadResponse) + } + _ => Err(Error::UnsupportedForkVariant(format!( + "called get_payload_v5 with {}", + fork_name + ))), + } + } + pub async fn forkchoice_updated_v1( &self, forkchoice_state: ForkchoiceState, @@ -1071,6 +1119,7 @@ impl HttpJsonRpc { new_payload_v2: capabilities.contains(ENGINE_NEW_PAYLOAD_V2), new_payload_v3: capabilities.contains(ENGINE_NEW_PAYLOAD_V3), new_payload_v4: capabilities.contains(ENGINE_NEW_PAYLOAD_V4), + new_payload_v5: capabilities.contains(ENGINE_NEW_PAYLOAD_V5), forkchoice_updated_v1: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V1), forkchoice_updated_v2: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V2), forkchoice_updated_v3: capabilities.contains(ENGINE_FORKCHOICE_UPDATED_V3), @@ -1082,6 +1131,7 @@ impl HttpJsonRpc { get_payload_v2: capabilities.contains(ENGINE_GET_PAYLOAD_V2), get_payload_v3: capabilities.contains(ENGINE_GET_PAYLOAD_V3), get_payload_v4: capabilities.contains(ENGINE_GET_PAYLOAD_V4), + get_payload_v5: capabilities.contains(ENGINE_GET_PAYLOAD_V5), get_client_version_v1: capabilities.contains(ENGINE_GET_CLIENT_VERSION_V1), get_blobs_v1: capabilities.contains(ENGINE_GET_BLOBS_V1), }) @@ -1212,6 +1262,13 @@ impl HttpJsonRpc { Err(Error::RequiredMethodUnsupported("engine_newPayloadV4")) } } + NewPayloadRequest::Fulu(new_payload_request_fulu) => { + if engine_capabilities.new_payload_v5 { + self.new_payload_v5_fulu(new_payload_request_fulu).await + } else { + Err(Error::RequiredMethodUnsupported("engine_newPayloadV5")) + } + } } } @@ -1247,6 +1304,13 @@ impl HttpJsonRpc { Err(Error::RequiredMethodUnsupported("engine_getPayloadv4")) } } + ForkName::Fulu => { + if engine_capabilities.get_payload_v5 { + self.get_payload_v5(fork_name, payload_id).await + } else { + Err(Error::RequiredMethodUnsupported("engine_getPayloadv5")) + } + } ForkName::Base | ForkName::Altair => Err(Error::UnsupportedForkVariant(format!( "called get_payload with {}", fork_name diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 1c6639804e..86acaaaf3b 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -65,7 +65,7 @@ pub struct JsonPayloadIdResponse { } #[superstruct( - variants(V1, V2, V3, V4), + variants(V1, V2, V3, V4, V5), variant_attributes( derive(Debug, PartialEq, Default, Serialize, Deserialize,), serde(bound = "E: EthSpec", rename_all = "camelCase"), @@ -100,12 +100,12 @@ pub struct JsonExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(V2, V3, V4))] + #[superstruct(only(V2, V3, V4, V5))] pub withdrawals: VariableList, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] #[serde(with = "serde_utils::u64_hex_be")] pub blob_gas_used: u64, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] #[serde(with = "serde_utils::u64_hex_be")] pub excess_blob_gas: u64, } @@ -214,6 +214,35 @@ impl From> for JsonExecutionPayloadV4 } } +impl From> for JsonExecutionPayloadV5 { + fn from(payload: ExecutionPayloadFulu) -> Self { + JsonExecutionPayloadV5 { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + impl From> for JsonExecutionPayload { fn from(execution_payload: ExecutionPayload) -> Self { match execution_payload { @@ -221,6 +250,7 @@ impl From> for JsonExecutionPayload { ExecutionPayload::Capella(payload) => JsonExecutionPayload::V2(payload.into()), ExecutionPayload::Deneb(payload) => JsonExecutionPayload::V3(payload.into()), ExecutionPayload::Electra(payload) => JsonExecutionPayload::V4(payload.into()), + ExecutionPayload::Fulu(payload) => JsonExecutionPayload::V5(payload.into()), } } } @@ -330,6 +360,35 @@ impl From> for ExecutionPayloadElectra } } +impl From> for ExecutionPayloadFulu { + fn from(payload: JsonExecutionPayloadV5) -> Self { + ExecutionPayloadFulu { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom, + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data, + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions: payload.transactions, + withdrawals: payload + .withdrawals + .into_iter() + .map(Into::into) + .collect::>() + .into(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + impl From> for ExecutionPayload { fn from(json_execution_payload: JsonExecutionPayload) -> Self { match json_execution_payload { @@ -337,6 +396,7 @@ impl From> for ExecutionPayload { JsonExecutionPayload::V2(payload) => ExecutionPayload::Capella(payload.into()), JsonExecutionPayload::V3(payload) => ExecutionPayload::Deneb(payload.into()), JsonExecutionPayload::V4(payload) => ExecutionPayload::Electra(payload.into()), + JsonExecutionPayload::V5(payload) => ExecutionPayload::Fulu(payload.into()), } } } @@ -389,7 +449,7 @@ impl TryFrom for ExecutionRequests { } #[superstruct( - variants(V1, V2, V3, V4), + variants(V1, V2, V3, V4, V5), variant_attributes( derive(Debug, PartialEq, Serialize, Deserialize), serde(bound = "E: EthSpec", rename_all = "camelCase") @@ -408,13 +468,15 @@ pub struct JsonGetPayloadResponse { pub execution_payload: JsonExecutionPayloadV3, #[superstruct(only(V4), partial_getter(rename = "execution_payload_v4"))] pub execution_payload: JsonExecutionPayloadV4, + #[superstruct(only(V5), partial_getter(rename = "execution_payload_v5"))] + pub execution_payload: JsonExecutionPayloadV5, #[serde(with = "serde_utils::u256_hex_be")] pub block_value: Uint256, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] pub blobs_bundle: JsonBlobsBundleV1, - #[superstruct(only(V3, V4))] + #[superstruct(only(V3, V4, V5))] pub should_override_builder: bool, - #[superstruct(only(V4))] + #[superstruct(only(V4, V5))] pub execution_requests: JsonExecutionRequests, } @@ -451,6 +513,15 @@ impl TryFrom> for GetPayloadResponse { requests: response.execution_requests.try_into()?, })) } + JsonGetPayloadResponse::V5(response) => { + Ok(GetPayloadResponse::Fulu(GetPayloadResponseFulu { + execution_payload: response.execution_payload.into(), + block_value: response.block_value, + blobs_bundle: response.blobs_bundle.into(), + should_override_builder: response.should_override_builder, + requests: response.execution_requests.try_into()?, + })) + } } } } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index 60bc848974..a86b2fd9bb 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -9,11 +9,11 @@ use types::{ }; use types::{ ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionRequests, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionRequests, }; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Clone, Debug, PartialEq),), map_into(ExecutionPayload), map_ref_into(ExecutionPayloadRef), @@ -39,11 +39,13 @@ pub struct NewPayloadRequest<'block, E: EthSpec> { pub execution_payload: &'block ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: &'block ExecutionPayloadElectra, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: &'block ExecutionPayloadFulu, + #[superstruct(only(Deneb, Electra, Fulu))] pub versioned_hashes: Vec, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub parent_beacon_block_root: Hash256, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: &'block ExecutionRequests, } @@ -54,6 +56,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.parent_hash, Self::Deneb(payload) => payload.execution_payload.parent_hash, Self::Electra(payload) => payload.execution_payload.parent_hash, + Self::Fulu(payload) => payload.execution_payload.parent_hash, } } @@ -63,6 +66,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.block_hash, Self::Deneb(payload) => payload.execution_payload.block_hash, Self::Electra(payload) => payload.execution_payload.block_hash, + Self::Fulu(payload) => payload.execution_payload.block_hash, } } @@ -72,6 +76,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(payload) => payload.execution_payload.block_number, Self::Deneb(payload) => payload.execution_payload.block_number, Self::Electra(payload) => payload.execution_payload.block_number, + Self::Fulu(payload) => payload.execution_payload.block_number, } } @@ -81,6 +86,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(request) => ExecutionPayloadRef::Capella(request.execution_payload), Self::Deneb(request) => ExecutionPayloadRef::Deneb(request.execution_payload), Self::Electra(request) => ExecutionPayloadRef::Electra(request.execution_payload), + Self::Fulu(request) => ExecutionPayloadRef::Fulu(request.execution_payload), } } @@ -92,6 +98,7 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { Self::Capella(request) => ExecutionPayload::Capella(request.execution_payload.clone()), Self::Deneb(request) => ExecutionPayload::Deneb(request.execution_payload.clone()), Self::Electra(request) => ExecutionPayload::Electra(request.execution_payload.clone()), + Self::Fulu(request) => ExecutionPayload::Fulu(request.execution_payload.clone()), } } @@ -190,6 +197,17 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<'a, E> parent_beacon_block_root: block_ref.parent_root, execution_requests: &block_ref.body.execution_requests, })), + BeaconBlockRef::Fulu(block_ref) => Ok(Self::Fulu(NewPayloadRequestFulu { + execution_payload: &block_ref.body.execution_payload.execution_payload, + versioned_hashes: block_ref + .body + .blob_kzg_commitments + .iter() + .map(kzg_commitment_to_versioned_hash) + .collect(), + parent_beacon_block_root: block_ref.parent_root, + execution_requests: &block_ref.body.execution_requests, + })), } } } @@ -209,6 +227,7 @@ impl<'a, E: EthSpec> TryFrom> for NewPayloadRequest<' })), ExecutionPayloadRef::Deneb(_) => Err(Self::Error::IncorrectStateVariant), ExecutionPayloadRef::Electra(_) => Err(Self::Error::IncorrectStateVariant), + ExecutionPayloadRef::Fulu(_) => Err(Self::Error::IncorrectStateVariant), } } } diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index f3b12b21d1..118d7adfca 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -54,8 +54,8 @@ use types::{ }; use types::{ BeaconStateError, BlindedPayload, ChainSpec, Epoch, ExecPayload, ExecutionPayloadBellatrix, - ExecutionPayloadCapella, ExecutionPayloadElectra, FullPayload, ProposerPreparationData, - PublicKeyBytes, Signature, Slot, + ExecutionPayloadCapella, ExecutionPayloadElectra, ExecutionPayloadFulu, FullPayload, + ProposerPreparationData, PublicKeyBytes, Signature, Slot, }; mod block_hash; @@ -124,6 +124,14 @@ impl TryFrom> for ProvenancedPayload BlockProposalContents::PayloadAndBlobs { + payload: ExecutionPayloadHeader::Fulu(builder_bid.header).into(), + block_value: builder_bid.value, + kzg_commitments: builder_bid.blob_kzg_commitments, + blobs_and_proofs: None, + // TODO(fulu): update this with builder api returning the requests + requests: None, + }, }; Ok(ProvenancedPayload::Builder( BlockProposalContentsType::Blinded(block_proposal_contents), @@ -1821,6 +1829,7 @@ impl ExecutionLayer { ForkName::Capella => ExecutionPayloadCapella::default().into(), ForkName::Deneb => ExecutionPayloadDeneb::default().into(), ForkName::Electra => ExecutionPayloadElectra::default().into(), + ForkName::Fulu => ExecutionPayloadFulu::default().into(), ForkName::Base | ForkName::Altair => { return Err(Error::InvalidForkForPayload); } diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 4fab7150ce..2a39796707 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -19,7 +19,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; use types::{ Blob, ChainSpec, EthSpec, ExecutionBlockHash, ExecutionPayload, ExecutionPayloadBellatrix, - ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, + ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadHeader, FixedBytesExtended, ForkName, Hash256, Transaction, Transactions, Uint256, }; @@ -147,6 +147,7 @@ pub struct ExecutionBlockGenerator { pub shanghai_time: Option, // capella pub cancun_time: Option, // deneb pub prague_time: Option, // electra + pub osaka_time: Option, // fulu /* * deneb stuff */ @@ -162,6 +163,7 @@ fn make_rng() -> Arc> { } impl ExecutionBlockGenerator { + #[allow(clippy::too_many_arguments)] pub fn new( terminal_total_difficulty: Uint256, terminal_block_number: u64, @@ -169,6 +171,7 @@ impl ExecutionBlockGenerator { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, kzg: Option>, ) -> Self { let mut gen = Self { @@ -185,6 +188,7 @@ impl ExecutionBlockGenerator { shanghai_time, cancun_time, prague_time, + osaka_time, blobs_bundles: <_>::default(), kzg, rng: make_rng(), @@ -233,13 +237,16 @@ impl ExecutionBlockGenerator { } pub fn get_fork_at_timestamp(&self, timestamp: u64) -> ForkName { - match self.prague_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Electra, - _ => match self.cancun_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, - _ => match self.shanghai_time { - Some(fork_time) if timestamp >= fork_time => ForkName::Capella, - _ => ForkName::Bellatrix, + match self.osaka_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Fulu, + _ => match self.prague_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Electra, + _ => match self.cancun_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Deneb, + _ => match self.shanghai_time { + Some(fork_time) if timestamp >= fork_time => ForkName::Capella, + _ => ForkName::Bellatrix, + }, }, }, } @@ -664,6 +671,25 @@ impl ExecutionBlockGenerator { blob_gas_used: 0, excess_blob_gas: 0, }), + ForkName::Fulu => ExecutionPayload::Fulu(ExecutionPayloadFulu { + parent_hash: head_block_hash, + fee_recipient: pa.suggested_fee_recipient, + receipts_root: Hash256::repeat_byte(42), + state_root: Hash256::repeat_byte(43), + logs_bloom: vec![0; 256].into(), + prev_randao: pa.prev_randao, + block_number: parent.block_number() + 1, + gas_limit: DEFAULT_GAS_LIMIT, + gas_used: GAS_USED, + timestamp: pa.timestamp, + extra_data: "block gen was here".as_bytes().to_vec().into(), + base_fee_per_gas: Uint256::from(1u64), + block_hash: ExecutionBlockHash::zero(), + transactions: vec![].into(), + withdrawals: pa.withdrawals.clone().into(), + blob_gas_used: 0, + excess_blob_gas: 0, + }), _ => unreachable!(), }, }; @@ -811,6 +837,12 @@ pub fn generate_genesis_header( *header.transactions_root_mut() = empty_transactions_root; Some(header) } + ForkName::Fulu => { + let mut header = ExecutionPayloadHeader::Fulu(<_>::default()); + *header.block_hash_mut() = genesis_block_hash.unwrap_or_default(); + *header.transactions_root_mut() = empty_transactions_root; + Some(header) + } } } @@ -883,6 +915,7 @@ mod test { None, None, None, + None, ); for i in 0..=TERMINAL_BLOCK { diff --git a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs index 9365024ffb..0babb9d1a3 100644 --- a/beacon_node/execution_layer/src/test_utils/handle_rpc.rs +++ b/beacon_node/execution_layer/src/test_utils/handle_rpc.rs @@ -99,7 +99,8 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V1 | ENGINE_NEW_PAYLOAD_V2 | ENGINE_NEW_PAYLOAD_V3 - | ENGINE_NEW_PAYLOAD_V4 => { + | ENGINE_NEW_PAYLOAD_V4 + | ENGINE_NEW_PAYLOAD_V5 => { let request = match method { ENGINE_NEW_PAYLOAD_V1 => JsonExecutionPayload::V1( get_param::>(params, 0) @@ -121,6 +122,9 @@ pub async fn handle_rpc( ENGINE_NEW_PAYLOAD_V4 => get_param::>(params, 0) .map(|jep| JsonExecutionPayload::V4(jep)) .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, + ENGINE_NEW_PAYLOAD_V5 => get_param::>(params, 0) + .map(|jep| JsonExecutionPayload::V5(jep)) + .map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?, _ => unreachable!(), }; @@ -222,6 +226,54 @@ pub async fn handle_rpc( )); } } + ForkName::Fulu => { + if method == ENGINE_NEW_PAYLOAD_V1 + || method == ENGINE_NEW_PAYLOAD_V2 + || method == ENGINE_NEW_PAYLOAD_V3 + || method == ENGINE_NEW_PAYLOAD_V4 + { + return Err(( + format!("{} called after Fulu fork!", method), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V1(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV1` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V2(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV2` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V3(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV3` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + if matches!(request, JsonExecutionPayload::V4(_)) { + return Err(( + format!( + "{} called with `ExecutionPayloadV4` after Fulu fork!", + method + ), + GENERIC_ERROR_CODE, + )); + } + } _ => unreachable!(), }; @@ -260,7 +312,8 @@ pub async fn handle_rpc( ENGINE_GET_PAYLOAD_V1 | ENGINE_GET_PAYLOAD_V2 | ENGINE_GET_PAYLOAD_V3 - | ENGINE_GET_PAYLOAD_V4 => { + | ENGINE_GET_PAYLOAD_V4 + | ENGINE_GET_PAYLOAD_V5 => { let request: JsonPayloadIdRequest = get_param(params, 0).map_err(|s| (s, BAD_PARAMS_ERROR_CODE))?; let id = request.into(); @@ -320,6 +373,23 @@ pub async fn handle_rpc( )); } + // validate method called correctly according to fulu fork time + if ctx + .execution_block_generator + .read() + .get_fork_at_timestamp(response.timestamp()) + == ForkName::Fulu + && (method == ENGINE_GET_PAYLOAD_V1 + || method == ENGINE_GET_PAYLOAD_V2 + || method == ENGINE_GET_PAYLOAD_V3 + || method == ENGINE_GET_PAYLOAD_V4) + { + return Err(( + format!("{} called after Fulu fork!", method), + FORK_REQUEST_MISMATCH_ERROR_CODE, + )); + } + match method { ENGINE_GET_PAYLOAD_V1 => { Ok(serde_json::to_value(JsonExecutionPayload::from(response)).unwrap()) @@ -380,6 +450,24 @@ pub async fn handle_rpc( } _ => unreachable!(), }), + ENGINE_GET_PAYLOAD_V5 => Ok(match JsonExecutionPayload::from(response) { + JsonExecutionPayload::V5(execution_payload) => { + serde_json::to_value(JsonGetPayloadResponseV5 { + execution_payload, + block_value: Uint256::from(DEFAULT_MOCK_EL_PAYLOAD_VALUE_WEI), + blobs_bundle: maybe_blobs + .ok_or(( + "No blobs returned despite V5 Payload".to_string(), + GENERIC_ERROR_CODE, + ))? + .into(), + should_override_builder: false, + execution_requests: Default::default(), + }) + .unwrap() + } + _ => unreachable!(), + }), _ => unreachable!(), } } @@ -411,7 +499,10 @@ pub async fn handle_rpc( .map(|opt| opt.map(JsonPayloadAttributes::V1)) .transpose() } - ForkName::Capella | ForkName::Deneb | ForkName::Electra => { + ForkName::Capella + | ForkName::Deneb + | ForkName::Electra + | ForkName::Fulu => { get_param::>(params, 1) .map(|opt| opt.map(JsonPayloadAttributes::V2)) .transpose() @@ -475,7 +566,7 @@ pub async fn handle_rpc( )); } } - ForkName::Deneb | ForkName::Electra => { + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => { if method == ENGINE_FORKCHOICE_UPDATED_V1 { return Err(( format!("{} called after Deneb fork!", method), diff --git a/beacon_node/execution_layer/src/test_utils/mock_builder.rs b/beacon_node/execution_layer/src/test_utils/mock_builder.rs index 879b54eb07..65181dcf4f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_builder.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_builder.rs @@ -16,7 +16,7 @@ use tempfile::NamedTempFile; use tree_hash::TreeHash; use types::builder_bid::{ BuilderBid, BuilderBidBellatrix, BuilderBidCapella, BuilderBidDeneb, BuilderBidElectra, - SignedBuilderBid, + BuilderBidFulu, SignedBuilderBid, }; use types::{ Address, BeaconState, ChainSpec, EthSpec, ExecPayload, ExecutionPayload, @@ -95,6 +95,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.fee_recipient = fee_recipient; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.fee_recipient = fee_recipient; + } } } @@ -112,6 +115,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.gas_limit = gas_limit; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.gas_limit = gas_limit; + } } } @@ -133,6 +139,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.parent_hash = ExecutionBlockHash::from_root(parent_hash); } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.parent_hash = ExecutionBlockHash::from_root(parent_hash); + } } } @@ -150,6 +159,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.prev_randao = prev_randao; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.prev_randao = prev_randao; + } } } @@ -167,6 +179,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.block_number = block_number; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.block_number = block_number; + } } } @@ -184,6 +199,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.timestamp = timestamp; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.timestamp = timestamp; + } } } @@ -201,6 +219,9 @@ impl BidStuff for BuilderBid { ExecutionPayloadHeaderRefMut::Electra(header) => { header.withdrawals_root = withdrawals_root; } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.withdrawals_root = withdrawals_root; + } } } @@ -230,6 +251,10 @@ impl BidStuff for BuilderBid { header.extra_data = extra_data; header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); } + ExecutionPayloadHeaderRefMut::Fulu(header) => { + header.extra_data = extra_data; + header.block_hash = ExecutionBlockHash::from_root(header.tree_hash_root()); + } } } } @@ -378,6 +403,9 @@ pub fn serve( SignedBlindedBeaconBlock::Electra(block) => { block.message.body.execution_payload.tree_hash_root() } + SignedBlindedBeaconBlock::Fulu(block) => { + block.message.body.execution_payload.tree_hash_root() + } }; let payload = builder .el @@ -536,7 +564,7 @@ pub fn serve( expected_withdrawals, None, ), - ForkName::Deneb | ForkName::Electra => PayloadAttributes::new( + ForkName::Deneb | ForkName::Electra | ForkName::Fulu => PayloadAttributes::new( timestamp, *prev_randao, fee_recipient, @@ -592,6 +620,17 @@ pub fn serve( ) = payload_response.into(); match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { header: payload .as_electra() @@ -644,6 +683,17 @@ pub fn serve( Option>, ) = payload_response.into(); match fork { + ForkName::Fulu => BuilderBid::Fulu(BuilderBidFulu { + header: payload + .as_fulu() + .map_err(|_| reject("incorrect payload variant"))? + .into(), + blob_kzg_commitments: maybe_blobs_bundle + .map(|b| b.commitments) + .unwrap_or_default(), + value: Uint256::from(DEFAULT_BUILDER_PAYLOAD_VALUE_WEI), + pubkey: builder.builder_sk.public_key().compress(), + }), ForkName::Electra => BuilderBid::Electra(BuilderBidElectra { header: payload .as_electra() diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index dc90d91c0f..9df8d9cc5c 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -28,6 +28,7 @@ impl MockExecutionLayer { None, None, None, + None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), spec, None, @@ -41,6 +42,7 @@ impl MockExecutionLayer { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, jwt_key: Option, spec: ChainSpec, kzg: Option>, @@ -57,6 +59,7 @@ impl MockExecutionLayer { shanghai_time, cancun_time, prague_time, + osaka_time, kzg, ); diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index faf6d4ef0b..5934c069a2 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -44,6 +44,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { new_payload_v2: true, new_payload_v3: true, new_payload_v4: true, + new_payload_v5: true, forkchoice_updated_v1: true, forkchoice_updated_v2: true, forkchoice_updated_v3: true, @@ -53,6 +54,7 @@ pub const DEFAULT_ENGINE_CAPABILITIES: EngineCapabilities = EngineCapabilities { get_payload_v2: true, get_payload_v3: true, get_payload_v4: true, + get_payload_v5: true, get_client_version_v1: true, get_blobs_v1: true, }; @@ -82,6 +84,7 @@ pub struct MockExecutionConfig { pub shanghai_time: Option, pub cancun_time: Option, pub prague_time: Option, + pub osaka_time: Option, } impl Default for MockExecutionConfig { @@ -95,6 +98,7 @@ impl Default for MockExecutionConfig { shanghai_time: None, cancun_time: None, prague_time: None, + osaka_time: None, } } } @@ -117,6 +121,7 @@ impl MockServer { None, // FIXME(capella): should this be the default? None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? + None, // FIXME(fulu): should this be the default? None, ) } @@ -135,6 +140,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, } = config; let last_echo_request = Arc::new(RwLock::new(None)); let preloaded_responses = Arc::new(Mutex::new(vec![])); @@ -145,6 +151,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, kzg, ); @@ -208,6 +215,7 @@ impl MockServer { shanghai_time: Option, cancun_time: Option, prague_time: Option, + osaka_time: Option, kzg: Option>, ) -> Self { Self::new_with_config( @@ -221,6 +229,7 @@ impl MockServer { shanghai_time, cancun_time, prague_time, + osaka_time, }, kzg, ) diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index e1ecf2d4fc..8e0a51a32a 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -75,7 +75,7 @@ pub async fn gossip_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -124,7 +124,7 @@ pub async fn gossip_partial_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -165,7 +165,7 @@ pub async fn gossip_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -261,7 +261,7 @@ pub async fn consensus_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -307,7 +307,7 @@ pub async fn consensus_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -423,7 +423,7 @@ pub async fn consensus_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -475,7 +475,7 @@ pub async fn equivocation_invalid() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -533,7 +533,7 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_a` as valid */ assert!(tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_a.clone(), blobs_a), validation_level ) @@ -547,7 +547,7 @@ pub async fn equivocation_consensus_early_equivocation() { /* submit `block_b` which should induce equivocation */ let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_b.clone(), blobs_b), validation_level, ) @@ -596,7 +596,7 @@ pub async fn equivocation_gossip() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&PublishBlockRequest::new(block, blobs), validation_level) + .post_beacon_blocks_v2_ssz(&PublishBlockRequest::new(block, blobs), validation_level) .await; assert!(response.is_err()); @@ -721,7 +721,7 @@ pub async fn equivocation_full_pass() { let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), blobs), validation_level, ) @@ -1413,7 +1413,7 @@ pub async fn block_seen_on_gossip_without_blobs() { // Post the block *and* blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some(blobs)), validation_level, ) @@ -1498,7 +1498,7 @@ pub async fn block_seen_on_gossip_with_some_blobs() { // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some(blobs)), validation_level, ) @@ -1571,7 +1571,7 @@ pub async fn blobs_seen_on_gossip_without_block() { // Post the block *and* all blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))), validation_level, ) @@ -1645,7 +1645,7 @@ pub async fn blobs_seen_on_gossip_without_block_and_no_http_blobs() { // Post just the block to the HTTP API (blob lists are empty). let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new( block.clone(), Some((Default::default(), Default::default())), @@ -1717,7 +1717,7 @@ pub async fn slashable_blobs_seen_on_gossip_cause_failure() { // Post block A *and* all its blobs to the HTTP API. let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2( + .post_beacon_blocks_v2_ssz( &PublishBlockRequest::new(block_a.clone(), Some((kzg_proofs_a, blobs_a))), validation_level, ) @@ -1778,7 +1778,7 @@ pub async fn duplicate_block_status_code() { let block_request = PublishBlockRequest::new(block.clone(), Some((kzg_proofs, blobs))); let response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_request, validation_level) + .post_beacon_blocks_v2_ssz(&block_request, validation_level) .await; // This should result in the block being fully imported. @@ -1791,7 +1791,7 @@ pub async fn duplicate_block_status_code() { // Post again. let duplicate_response: Result<(), eth2::Error> = tester .client - .post_beacon_blocks_v2(&block_request, validation_level) + .post_beacon_blocks_v2_ssz(&block_request, validation_level) .await; let err = duplicate_response.unwrap_err(); assert_eq!(err.status().unwrap(), duplicate_block_status_code); diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 1efe44a613..85d3b4e9ba 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2232,9 +2232,9 @@ impl ApiTester { pub async fn test_get_config_spec(self) -> Self { let result = self .client - .get_config_spec::() + .get_config_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&self.chain.spec, None); diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index 5d86936d41..c3d20bbfb1 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -20,7 +20,7 @@ use types::{ LightClientBootstrap, LightClientFinalityUpdate, LightClientOptimisticUpdate, LightClientUpdate, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, }; use unsigned_varint::codec::Uvi; @@ -458,6 +458,9 @@ fn context_bytes( return match **ref_box_block { // NOTE: If you are adding another fork type here, be sure to modify the // `fork_context.to_context_bytes()` function to support it as well! + SignedBeaconBlock::Fulu { .. } => { + fork_context.to_context_bytes(ForkName::Fulu) + } SignedBeaconBlock::Electra { .. } => { fork_context.to_context_bytes(ForkName::Electra) } @@ -682,18 +685,18 @@ fn handle_rpc_response( SignedBeaconBlock::Base(SignedBeaconBlockBase::from_ssz_bytes(decoded_buffer)?), )))), SupportedProtocol::BlobsByRangeV1 => match fork_name { - Some(ForkName::Deneb) | Some(ForkName::Electra) => { - Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))) + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RpcSuccessResponse::BlobsByRange(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for blobs by range".to_string(), + )) + } } - Some(ForkName::Base) - | Some(ForkName::Altair) - | Some(ForkName::Bellatrix) - | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - "Invalid fork name for blobs by range".to_string(), - )), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -703,18 +706,18 @@ fn handle_rpc_response( )), }, SupportedProtocol::BlobsByRootV1 => match fork_name { - Some(ForkName::Deneb) | Some(ForkName::Electra) => { - Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( - BlobSidecar::from_ssz_bytes(decoded_buffer)?, - )))) + Some(fork_name) => { + if fork_name.deneb_enabled() { + Ok(Some(RpcSuccessResponse::BlobsByRoot(Arc::new( + BlobSidecar::from_ssz_bytes(decoded_buffer)?, + )))) + } else { + Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + "Invalid fork name for blobs by root".to_string(), + )) + } } - Some(ForkName::Base) - | Some(ForkName::Altair) - | Some(ForkName::Bellatrix) - | Some(ForkName::Capella) => Err(RPCError::ErrorResponse( - RpcErrorResponse::InvalidRequest, - "Invalid fork name for blobs by root".to_string(), - )), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -864,6 +867,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Fulu) => Ok(Some(RpcSuccessResponse::BlocksByRange(Arc::new( + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -897,6 +903,9 @@ fn handle_rpc_response( decoded_buffer, )?), )))), + Some(ForkName::Fulu) => Ok(Some(RpcSuccessResponse::BlocksByRoot(Arc::new( + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu::from_ssz_bytes(decoded_buffer)?), + )))), None => Err(RPCError::ErrorResponse( RpcErrorResponse::InvalidRequest, format!( @@ -948,12 +957,14 @@ mod tests { let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); + let fulu_fork_epoch = Epoch::new(6); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); + chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), @@ -962,6 +973,7 @@ mod tests { ForkName::Capella => capella_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(Spec::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(Spec::slots_per_epoch()), + ForkName::Fulu => fulu_fork_epoch.start_slot(Spec::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } @@ -1396,6 +1408,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar())), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRange(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::BlobsByRootV1, @@ -1416,6 +1438,16 @@ mod tests { Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::BlobsByRootV1, + RpcResponse::Success(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar())), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::BlobsByRoot(empty_blob_sidecar()))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRangeV1, @@ -1444,6 +1476,20 @@ mod tests { ))), ); + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRangeV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + )), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRange( + empty_data_column_sidecar() + ))), + ); + assert_eq!( encode_then_decode_response( SupportedProtocol::DataColumnsByRootV1, @@ -1471,6 +1517,20 @@ mod tests { empty_data_column_sidecar() ))), ); + + assert_eq!( + encode_then_decode_response( + SupportedProtocol::DataColumnsByRootV1, + RpcResponse::Success(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + )), + ForkName::Fulu, + &chain_spec + ), + Ok(Some(RpcSuccessResponse::DataColumnsByRoot( + empty_data_column_sidecar() + ))), + ); } // Test RPCResponse encoding/decoding for V1 messages diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 57c2795b04..87bde58292 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -18,9 +18,9 @@ use tokio_util::{ }; use types::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockCapella, BeaconBlockElectra, - BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, ForkContext, - ForkName, LightClientBootstrap, LightClientBootstrapAltair, LightClientFinalityUpdate, - LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, + BeaconBlockFulu, BlobSidecar, ChainSpec, DataColumnSidecar, EmptyBlock, EthSpec, EthSpecId, + ForkContext, ForkName, LightClientBootstrap, LightClientBootstrapAltair, + LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientUpdate, MainnetEthSpec, MinimalEthSpec, Signature, SignedBeaconBlock, }; @@ -73,6 +73,15 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD: LazyLock = La .len() }); +pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyLock::new(|| { + SignedBeaconBlock::::from_block( + BeaconBlock::Fulu(BeaconBlockFulu::full(&MainnetEthSpec::default_spec())), + Signature::empty(), + ) + .as_ssz_bytes() + .len() +}); + /// The `BeaconBlockBellatrix` block has an `ExecutionPayload` field which has a max size ~16 GiB for future proofing. /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network @@ -105,6 +114,15 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. +pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { + *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD + + types::ExecutionPayload::::max_execution_payload_fulu_size() + + ssz::BYTES_PER_LENGTH_OFFSET + + (::ssz_fixed_len() + * ::max_blobs_per_block()) + + ssz::BYTES_PER_LENGTH_OFFSET +}); + pub static BLOB_SIDECAR_SIZE: LazyLock = LazyLock::new(BlobSidecar::::max_size); @@ -209,6 +227,10 @@ pub fn rpc_block_limits_by_fork(current_fork: ForkName) -> RpcLimits { *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than altair and bellatrix blocks *SIGNED_BEACON_BLOCK_ELECTRA_MAX, // Electra block is larger than Deneb block ), + ForkName::Fulu => RpcLimits::new( + *SIGNED_BEACON_BLOCK_BASE_MIN, // Base block is smaller than all other blocks + *SIGNED_BEACON_BLOCK_FULU_MAX, // Fulu block is largest + ), } } @@ -226,7 +248,7 @@ fn rpc_light_client_updates_by_range_limits_by_fork(current_fork: ForkName) -> R ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_DENEB_MAX) } - ForkName::Electra => { + ForkName::Electra | ForkName::Fulu => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_UPDATES_BY_RANGE_ELECTRA_MAX) } } @@ -246,7 +268,7 @@ fn rpc_light_client_finality_update_limits_by_fork(current_fork: ForkName) -> Rp ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_FINALITY_UPDATE_DENEB_MAX) } - ForkName::Electra => { + ForkName::Electra | ForkName::Fulu => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_FINALITY_UPDATE_ELECTRA_MAX) } } @@ -267,7 +289,7 @@ fn rpc_light_client_optimistic_update_limits_by_fork(current_fork: ForkName) -> ForkName::Deneb => { RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_OPTIMISTIC_UPDATE_DENEB_MAX) } - ForkName::Electra => RpcLimits::new( + ForkName::Electra | ForkName::Fulu => RpcLimits::new( altair_fixed_len, *LIGHT_CLIENT_OPTIMISTIC_UPDATE_ELECTRA_MAX, ), @@ -284,7 +306,9 @@ fn rpc_light_client_bootstrap_limits_by_fork(current_fork: ForkName) -> RpcLimit } ForkName::Capella => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_CAPELLA_MAX), ForkName::Deneb => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_DENEB_MAX), - ForkName::Electra => RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX), + ForkName::Electra | ForkName::Fulu => { + RpcLimits::new(altair_fixed_len, *LIGHT_CLIENT_BOOTSTRAP_ELECTRA_MAX) + } } } diff --git a/beacon_node/lighthouse_network/src/types/pubsub.rs b/beacon_node/lighthouse_network/src/types/pubsub.rs index 9f68278e28..c976959470 100644 --- a/beacon_node/lighthouse_network/src/types/pubsub.rs +++ b/beacon_node/lighthouse_network/src/types/pubsub.rs @@ -13,8 +13,9 @@ use types::{ ProposerSlashing, SignedAggregateAndProof, SignedAggregateAndProofBase, SignedAggregateAndProofElectra, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, - SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBlsToExecutionChange, - SignedContributionAndProof, SignedVoluntaryExit, SubnetId, SyncCommitteeMessage, SyncSubnetId, + SignedBeaconBlockDeneb, SignedBeaconBlockElectra, SignedBeaconBlockFulu, + SignedBlsToExecutionChange, SignedContributionAndProof, SignedVoluntaryExit, SubnetId, + SyncCommitteeMessage, SyncSubnetId, }; #[derive(Debug, Clone, PartialEq)] @@ -242,6 +243,10 @@ impl PubsubMessage { SignedBeaconBlockElectra::from_ssz_bytes(data) .map_err(|e| format!("{:?}", e))?, ), + Some(ForkName::Fulu) => SignedBeaconBlock::::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(data) + .map_err(|e| format!("{:?}", e))?, + ), None => { return Err(format!( "Unknown gossipsub fork digest: {:?}", diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 174787f999..8cdecc6bfa 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -61,6 +61,7 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V deneb_topics } ForkName::Electra => vec![], + ForkName::Fulu => vec![], } } diff --git a/beacon_node/lighthouse_network/tests/common.rs b/beacon_node/lighthouse_network/tests/common.rs index 84e19c81d0..6a3ec6dd32 100644 --- a/beacon_node/lighthouse_network/tests/common.rs +++ b/beacon_node/lighthouse_network/tests/common.rs @@ -25,12 +25,14 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { let capella_fork_epoch = Epoch::new(3); let deneb_fork_epoch = Epoch::new(4); let electra_fork_epoch = Epoch::new(5); + let fulu_fork_epoch = Epoch::new(6); chain_spec.altair_fork_epoch = Some(altair_fork_epoch); chain_spec.bellatrix_fork_epoch = Some(bellatrix_fork_epoch); chain_spec.capella_fork_epoch = Some(capella_fork_epoch); chain_spec.deneb_fork_epoch = Some(deneb_fork_epoch); chain_spec.electra_fork_epoch = Some(electra_fork_epoch); + chain_spec.fulu_fork_epoch = Some(fulu_fork_epoch); let current_slot = match fork_name { ForkName::Base => Slot::new(0), @@ -39,6 +41,7 @@ pub fn fork_context(fork_name: ForkName) -> ForkContext { ForkName::Capella => capella_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Deneb => deneb_fork_epoch.start_slot(E::slots_per_epoch()), ForkName::Electra => electra_fork_epoch.start_slot(E::slots_per_epoch()), + ForkName::Fulu => fulu_fork_epoch.start_slot(E::slots_per_epoch()), }; ForkContext::new::(current_slot, Hash256::zero(), &chain_spec) } diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index 94aacad3e8..a43b3bd022 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -174,7 +174,7 @@ impl TestRig { } pub fn after_deneb(&self) -> bool { - matches!(self.fork_name, ForkName::Deneb | ForkName::Electra) + self.fork_name.deneb_enabled() } fn trigger_unknown_parent_block(&mut self, peer_id: PeerId, block: Arc>) { diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d8183de752..835133a059 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -1239,14 +1239,11 @@ mod release_tests { let stats = op_pool.attestation_stats(); let fork_name = state.fork_name_unchecked(); - match fork_name { - ForkName::Electra => { - assert_eq!(stats.num_attestation_data, 1); - } - _ => { - assert_eq!(stats.num_attestation_data, committees.len()); - } - }; + if fork_name.electra_enabled() { + assert_eq!(stats.num_attestation_data, 1); + } else { + assert_eq!(stats.num_attestation_data, committees.len()); + } assert_eq!( stats.num_attestations, @@ -1258,25 +1255,19 @@ mod release_tests { let best_attestations = op_pool .get_attestations(&state, |_| true, |_| true, spec) .expect("should have best attestations"); - match fork_name { - ForkName::Electra => { - assert_eq!(best_attestations.len(), 8); - } - _ => { - assert_eq!(best_attestations.len(), max_attestations); - } - }; + if fork_name.electra_enabled() { + assert_eq!(best_attestations.len(), 8); + } else { + assert_eq!(best_attestations.len(), max_attestations); + } // All the best attestations should be signed by at least `big_step_size` (4) validators. for att in &best_attestations { - match fork_name { - ForkName::Electra => { - assert!(att.num_set_aggregation_bits() >= small_step_size); - } - _ => { - assert!(att.num_set_aggregation_bits() >= big_step_size); - } - }; + if fork_name.electra_enabled() { + assert!(att.num_set_aggregation_bits() >= small_step_size); + } else { + assert!(att.num_set_aggregation_bits() >= big_step_size); + } } } @@ -1357,17 +1348,14 @@ mod release_tests { let num_big = target_committee_size / big_step_size; let fork_name = state.fork_name_unchecked(); - match fork_name { - ForkName::Electra => { - assert_eq!(op_pool.attestation_stats().num_attestation_data, 1); - } - _ => { - assert_eq!( - op_pool.attestation_stats().num_attestation_data, - committees.len() - ); - } - }; + if fork_name.electra_enabled() { + assert_eq!(op_pool.attestation_stats().num_attestation_data, 1); + } else { + assert_eq!( + op_pool.attestation_stats().num_attestation_data, + committees.len() + ); + } assert_eq!( op_pool.num_attestations(), @@ -1380,14 +1368,11 @@ mod release_tests { .get_attestations(&state, |_| true, |_| true, spec) .expect("should have valid best attestations"); - match fork_name { - ForkName::Electra => { - assert_eq!(best_attestations.len(), 8); - } - _ => { - assert_eq!(best_attestations.len(), max_attestations); - } - }; + if fork_name.electra_enabled() { + assert_eq!(best_attestations.len(), 8); + } else { + assert_eq!(best_attestations.len(), max_attestations); + } let total_active_balance = state.get_total_active_balance().unwrap(); diff --git a/beacon_node/src/lib.rs b/beacon_node/src/lib.rs index cca617d8c6..0c4cbf0f57 100644 --- a/beacon_node/src/lib.rs +++ b/beacon_node/src/lib.rs @@ -238,6 +238,7 @@ mod test { spec.bellatrix_fork_epoch = Some(Epoch::new(256)); spec.deneb_fork_epoch = Some(Epoch::new(257)); spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; let result = validator_fork_epochs(&spec); assert_eq!( result, diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index 14fc10ad6d..5c60aa8d7e 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -2,7 +2,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, - ExecutionPayloadDeneb, ExecutionPayloadElectra, + ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, }; macro_rules! impl_store_item { @@ -26,6 +26,7 @@ impl_store_item!(ExecutionPayloadBellatrix); impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); +impl_store_item!(ExecutionPayloadFulu); impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. @@ -42,17 +43,21 @@ impl StoreItem for ExecutionPayload { } fn from_store_bytes(bytes: &[u8]) -> Result { - ExecutionPayloadElectra::from_ssz_bytes(bytes) - .map(Self::Electra) + ExecutionPayloadFulu::from_ssz_bytes(bytes) + .map(Self::Fulu) .or_else(|_| { - ExecutionPayloadDeneb::from_ssz_bytes(bytes) - .map(Self::Deneb) + ExecutionPayloadElectra::from_ssz_bytes(bytes) + .map(Self::Electra) .or_else(|_| { - ExecutionPayloadCapella::from_ssz_bytes(bytes) - .map(Self::Capella) + ExecutionPayloadDeneb::from_ssz_bytes(bytes) + .map(Self::Deneb) .or_else(|_| { - ExecutionPayloadBellatrix::from_ssz_bytes(bytes) - .map(Self::Bellatrix) + ExecutionPayloadCapella::from_ssz_bytes(bytes) + .map(Self::Capella) + .or_else(|_| { + ExecutionPayloadBellatrix::from_ssz_bytes(bytes) + .map(Self::Bellatrix) + }) }) }) }) diff --git a/beacon_node/store/src/partial_beacon_state.rs b/beacon_node/store/src/partial_beacon_state.rs index 22eecdcc60..0b8bc2e0d4 100644 --- a/beacon_node/store/src/partial_beacon_state.rs +++ b/beacon_node/store/src/partial_beacon_state.rs @@ -16,7 +16,7 @@ use types::*; /// /// This can be deleted once schema versions prior to V22 are no longer supported. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes(derive(Debug, PartialEq, Clone, Encode, Decode)) )] #[derive(Debug, PartialEq, Clone, Encode)] @@ -68,9 +68,9 @@ where pub current_epoch_attestations: List, E::MaxPendingAttestations>, // Participation (Altair and later) - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub current_epoch_participation: List, // Finality @@ -80,13 +80,13 @@ where pub finalized_checkpoint: Checkpoint, // Inactivity - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub next_sync_committee: Arc>, // Execution @@ -110,37 +110,42 @@ where partial_getter(rename = "latest_execution_payload_header_electra") )] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct( + only(Fulu), + partial_getter(rename = "latest_execution_payload_header_fulu") + )] + pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, // Capella - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub next_withdrawal_validator_index: u64, #[ssz(skip_serializing, skip_deserializing)] - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub historical_summaries: Option>, // Electra - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub deposit_requests_start_index: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub deposit_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub exit_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub earliest_exit_epoch: Epoch, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub consolidation_balance_to_consume: u64, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub earliest_consolidation_epoch: Epoch, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_deposits: List, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_partial_withdrawals: List, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, } @@ -409,6 +414,31 @@ impl TryInto> for PartialBeaconState { ], [historical_summaries] ), + PartialBeaconState::Fulu(inner) => impl_try_into_beacon_state!( + inner, + Fulu, + BeaconStateFulu, + [ + previous_epoch_participation, + current_epoch_participation, + current_sync_committee, + next_sync_committee, + inactivity_scores, + latest_execution_payload_header, + next_withdrawal_index, + next_withdrawal_validator_index, + deposit_requests_start_index, + deposit_balance_to_consume, + exit_balance_to_consume, + earliest_exit_epoch, + consolidation_balance_to_consume, + earliest_consolidation_epoch, + pending_deposits, + pending_partial_withdrawals, + pending_consolidations + ], + [historical_summaries] + ), }; Ok(state) } diff --git a/common/eth2/src/types.rs b/common/eth2/src/types.rs index a303953a86..695d536944 100644 --- a/common/eth2/src/types.rs +++ b/common/eth2/src/types.rs @@ -1078,6 +1078,9 @@ impl ForkVersionDeserialize for SsePayloadAttributes { ForkName::Electra => serde_json::from_value(value) .map(Self::V3) .map_err(serde::de::Error::custom), + ForkName::Fulu => serde_json::from_value(value) + .map(Self::V3) + .map_err(serde::de::Error::custom), ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( "SsePayloadAttributes deserialization for {fork_name} not implemented" ))), @@ -1861,14 +1864,10 @@ impl PublishBlockRequest { impl TryFrom>> for PublishBlockRequest { type Error = &'static str; fn try_from(block: Arc>) -> Result { - match *block { - SignedBeaconBlock::Base(_) - | SignedBeaconBlock::Altair(_) - | SignedBeaconBlock::Bellatrix(_) - | SignedBeaconBlock::Capella(_) => Ok(PublishBlockRequest::Block(block)), - SignedBeaconBlock::Deneb(_) | SignedBeaconBlock::Electra(_) => Err( - "post-Deneb block contents cannot be fully constructed from just the signed block", - ), + if block.message().fork_name_unchecked().deneb_enabled() { + Err("post-Deneb block contents cannot be fully constructed from just the signed block") + } else { + Ok(PublishBlockRequest::Block(block)) } } } @@ -1972,16 +1971,18 @@ impl ForkVersionDeserialize for FullPayloadContents { value: Value, fork_name: ForkName, ) -> Result { - match fork_name { - ForkName::Bellatrix | ForkName::Capella => serde_json::from_value(value) - .map(Self::Payload) - .map_err(serde::de::Error::custom), - ForkName::Deneb | ForkName::Electra => serde_json::from_value(value) + if fork_name.deneb_enabled() { + serde_json::from_value(value) .map(Self::PayloadAndBlobs) - .map_err(serde::de::Error::custom), - ForkName::Base | ForkName::Altair => Err(serde::de::Error::custom(format!( + .map_err(serde::de::Error::custom) + } else if fork_name.bellatrix_enabled() { + serde_json::from_value(value) + .map(Self::Payload) + .map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom(format!( "FullPayloadContents deserialization for {fork_name} not implemented" - ))), + ))) } } } diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index 1eca01bbee..a107f6147a 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -15,7 +15,6 @@ TERMINAL_TOTAL_DIFFICULTY: 231707791542740786049188744689299064356246512 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - # Genesis # --------------------------------------------------------------- # *CUSTOM @@ -27,7 +26,6 @@ GENESIS_FORK_VERSION: 0x0000006f # *CUSTOM GENESIS_DELAY: 300 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -49,6 +47,9 @@ DENEB_FORK_EPOCH: 516608 # Wed Jan 31 2024 18:15:40 GMT+0000 # Electra ELECTRA_FORK_VERSION: 0x0500006f ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x0600006f +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -63,7 +64,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -90,7 +90,6 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - # Deposit contract # --------------------------------------------------------------- # xDai Mainnet @@ -141,4 +140,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index 500555a269..f71984059a 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -23,7 +23,6 @@ GENESIS_FORK_VERSION: 0x00000064 # 6000 seconds (100 minutes) GENESIS_DELAY: 6000 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -45,7 +44,9 @@ DENEB_FORK_EPOCH: 889856 # 2024-03-11T18:30:20.000Z # Electra ELECTRA_FORK_VERSION: 0x05000064 ELECTRA_FORK_EPOCH: 18446744073709551615 - +# Fulu +FULU_FORK_VERSION: 0x06000064 +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -60,7 +61,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**10 (= 1024) ~1.4 hour ETH1_FOLLOW_DISTANCE: 1024 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -76,7 +76,6 @@ MAX_PER_EPOCH_ACTIVATION_CHURN_LIMIT: 2 # 2**12 (= 4096) CHURN_LIMIT_QUOTIENT: 4096 - # Fork choice # --------------------------------------------------------------- # 40% @@ -124,4 +123,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index d67d77d3be..6d344b5b52 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -12,7 +12,6 @@ GENESIS_FORK_VERSION: 0x01017000 # Genesis delay 5 mins GENESIS_DELAY: 300 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -37,6 +36,9 @@ DENEB_FORK_EPOCH: 29696 # Electra ELECTRA_FORK_VERSION: 0x06017000 ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x07017000 +FULU_FORK_EPOCH: 18446744073709551615 # Time parameters # --------------------------------------------------------------- @@ -51,7 +53,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -128,4 +129,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 18591fecdc..244ddd564d 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -18,8 +18,6 @@ TERMINAL_TOTAL_DIFFICULTY: 58750000000000000000000 TERMINAL_BLOCK_HASH: 0x0000000000000000000000000000000000000000000000000000000000000000 TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH: 18446744073709551615 - - # Genesis # --------------------------------------------------------------- # `2**14` (= 16,384) @@ -31,7 +29,6 @@ GENESIS_FORK_VERSION: 0x00000000 # 604800 seconds (7 days) GENESIS_DELAY: 604800 - # Forking # --------------------------------------------------------------- # Some forks are disabled for now: @@ -40,23 +37,25 @@ GENESIS_DELAY: 604800 # Altair ALTAIR_FORK_VERSION: 0x01000000 -ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC +ALTAIR_FORK_EPOCH: 74240 # Oct 27, 2021, 10:56:23am UTC # Bellatrix BELLATRIX_FORK_VERSION: 0x02000000 -BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC +BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 -DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC +DENEB_FORK_EPOCH: 269568 # March 13, 2024, 01:55:35pm UTC # Electra ELECTRA_FORK_VERSION: 0x05000000 ELECTRA_FORK_EPOCH: 18446744073709551615 +# Fulu +FULU_FORK_VERSION: 0x06000000 +FULU_FORK_EPOCH: 18446744073709551615 # PeerDAS EIP7594_FORK_EPOCH: 18446744073709551615 - # Time parameters # --------------------------------------------------------------- # 12 seconds @@ -70,7 +69,6 @@ SHARD_COMMITTEE_PERIOD: 256 # 2**11 (= 2,048) Eth1 blocks ~8 hours ETH1_FOLLOW_DISTANCE: 2048 - # Validator cycle # --------------------------------------------------------------- # 2**2 (= 4) @@ -97,7 +95,6 @@ REORG_PARENT_WEIGHT_THRESHOLD: 160 # `2` epochs REORG_MAX_EPOCHS_SINCE_FINALIZATION: 2 - # Deposit contract # --------------------------------------------------------------- # Ethereum PoW Mainnet @@ -105,7 +102,6 @@ DEPOSIT_CHAIN_ID: 1 DEPOSIT_NETWORK_ID: 1 DEPOSIT_CONTRACT_ADDRESS: 0x00000000219ab540356cBB839Cbe05303d7705Fa - # Networking # --------------------------------------------------------------- # `10 * 2**20` (= 10485760, 10 MiB) @@ -150,4 +146,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index b08a6180bf..88f8359bd1 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -124,4 +124,4 @@ BLOB_SIDECAR_SUBNET_COUNT: 6 CUSTODY_REQUIREMENT: 4 DATA_COLUMN_SIDECAR_SUBNET_COUNT: 128 NUMBER_OF_COLUMNS: 128 -SAMPLES_PER_SLOT: 8 \ No newline at end of file +SAMPLES_PER_SLOT: 8 diff --git a/consensus/fork_choice/src/fork_choice.rs b/consensus/fork_choice/src/fork_choice.rs index 85704042df..4c25be950b 100644 --- a/consensus/fork_choice/src/fork_choice.rs +++ b/consensus/fork_choice/src/fork_choice.rs @@ -755,20 +755,15 @@ where if let Some((parent_justified, parent_finalized)) = parent_checkpoints { (parent_justified, parent_finalized) } else { - let justification_and_finalization_state = match block { - BeaconBlockRef::Electra(_) - | BeaconBlockRef::Deneb(_) - | BeaconBlockRef::Capella(_) - | BeaconBlockRef::Bellatrix(_) - | BeaconBlockRef::Altair(_) => { + let justification_and_finalization_state = + if block.fork_name_unchecked().altair_enabled() { // NOTE: Processing justification & finalization requires the progressive // balances cache, but we cannot initialize it here as we only have an // immutable reference. The state *should* have come straight from block // processing, which initialises the cache, but if we add other `on_block` // calls in future it could be worth passing a mutable reference. per_epoch_processing::altair::process_justification_and_finalization(state)? - } - BeaconBlockRef::Base(_) => { + } else { let mut validator_statuses = per_epoch_processing::base::ValidatorStatuses::new(state, spec) .map_err(Error::ValidatorStatuses)?; @@ -780,8 +775,7 @@ where &validator_statuses.total_balances, spec, )? - } - }; + }; ( justification_and_finalization_state.current_justified_checkpoint(), diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index ef017159a0..001b80fe11 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -1263,7 +1263,7 @@ async fn progressive_balances_cache_proposer_slashing() { // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip // the slot in this scenario rather than panic-ing. The same applies to // `progressive_balances_cache_attester_slashing`. - .apply_blocks(2) + .apply_blocks(1) .await .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) .await diff --git a/consensus/state_processing/src/common/get_attestation_participation.rs b/consensus/state_processing/src/common/get_attestation_participation.rs index fc09dad1f4..2c6fd3b215 100644 --- a/consensus/state_processing/src/common/get_attestation_participation.rs +++ b/consensus/state_processing/src/common/get_attestation_participation.rs @@ -44,22 +44,15 @@ pub fn get_attestation_participation_flag_indices( if is_matching_source && inclusion_delay <= E::slots_per_epoch().integer_sqrt() { participation_flag_indices.push(TIMELY_SOURCE_FLAG_INDEX); } - match state { - &BeaconState::Base(_) - | &BeaconState::Altair(_) - | &BeaconState::Bellatrix(_) - | &BeaconState::Capella(_) => { - if is_matching_target && inclusion_delay <= E::slots_per_epoch() { - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); - } - } - &BeaconState::Deneb(_) | &BeaconState::Electra(_) => { - if is_matching_target { - // [Modified in Deneb:EIP7045] - participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); - } + if state.fork_name_unchecked().deneb_enabled() { + if is_matching_target { + // [Modified in Deneb:EIP7045] + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); } + } else if is_matching_target && inclusion_delay <= E::slots_per_epoch() { + participation_flag_indices.push(TIMELY_TARGET_FLAG_INDEX); } + if is_matching_head && inclusion_delay == spec.min_attestation_inclusion_delay { participation_flag_indices.push(TIMELY_HEAD_FLAG_INDEX); } diff --git a/consensus/state_processing/src/common/slash_validator.rs b/consensus/state_processing/src/common/slash_validator.rs index 80d857cc00..bd60f16014 100644 --- a/consensus/state_processing/src/common/slash_validator.rs +++ b/consensus/state_processing/src/common/slash_validator.rs @@ -55,15 +55,12 @@ pub fn slash_validator( let whistleblower_index = opt_whistleblower_index.unwrap_or(proposer_index); let whistleblower_reward = validator_effective_balance .safe_div(spec.whistleblower_reward_quotient_for_state(state))?; - let proposer_reward = match state { - BeaconState::Base(_) => whistleblower_reward.safe_div(spec.proposer_reward_quotient)?, - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => whistleblower_reward + let proposer_reward = if state.fork_name_unchecked().altair_enabled() { + whistleblower_reward .safe_mul(PROPOSER_WEIGHT)? - .safe_div(WEIGHT_DENOMINATOR)?, + .safe_div(WEIGHT_DENOMINATOR)? + } else { + whistleblower_reward.safe_div(spec.proposer_reward_quotient)? }; // Ensure the whistleblower index is in the validator registry. diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index ccff3d80c0..10723ecc51 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -4,7 +4,7 @@ use super::per_block_processing::{ use crate::common::DepositDataTree; use crate::upgrade::electra::upgrade_state_to_electra; use crate::upgrade::{ - upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, + upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, upgrade_to_fulu, }; use safe_arith::{ArithError, SafeArith}; use std::sync::Arc; @@ -135,11 +135,27 @@ pub fn initialize_beacon_state_from_eth1( // Override latest execution payload header. // See https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#testing - if let Some(ExecutionPayloadHeader::Electra(header)) = execution_payload_header { + if let Some(ExecutionPayloadHeader::Electra(ref header)) = execution_payload_header { *state.latest_execution_payload_header_electra_mut()? = header.clone(); } } + // Upgrade to fulu if configured from genesis. + if spec + .fulu_fork_epoch + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) + { + upgrade_to_fulu(&mut state, spec)?; + + // Remove intermediate Electra fork from `state.fork`. + state.fork_mut().previous_version = spec.fulu_fork_version; + + // Override latest execution payload header. + if let Some(ExecutionPayloadHeader::Fulu(header)) = execution_payload_header { + *state.latest_execution_payload_header_fulu_mut()? = header.clone(); + } + } + // Now that we have our validators, initialize the caches (including the committees) state.build_caches(spec)?; diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 436f4934b9..22e0a5eab3 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -442,6 +442,12 @@ pub fn process_execution_payload>( _ => return Err(BlockProcessingError::IncorrectStateType), } } + ExecutionPayloadHeaderRefMut::Fulu(header_mut) => { + match payload.to_execution_payload_header() { + ExecutionPayloadHeader::Fulu(header) => *header_mut = header, + _ => return Err(BlockProcessingError::IncorrectStateType), + } + } } Ok(()) @@ -453,15 +459,17 @@ pub fn process_execution_payload>( /// repeatedly write code to treat these errors as false. /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_complete pub fn is_merge_transition_complete(state: &BeaconState) -> bool { - match state { + if state.fork_name_unchecked().capella_enabled() { + true + } else if state.fork_name_unchecked().bellatrix_enabled() { // We must check defaultness against the payload header with 0x0 roots, as that's what's meant // by `ExecutionPayloadHeader()` in the spec. - BeaconState::Bellatrix(_) => state + state .latest_execution_payload_header() .map(|header| !header.is_default_with_zero_roots()) - .unwrap_or(false), - BeaconState::Electra(_) | BeaconState::Deneb(_) | BeaconState::Capella(_) => true, - BeaconState::Base(_) | BeaconState::Altair(_) => false, + .unwrap_or(false) + } else { + false } } /// https://github.com/ethereum/consensus-specs/blob/dev/specs/bellatrix/beacon-chain.md#is_merge_transition_block @@ -603,66 +611,65 @@ pub fn process_withdrawals>( payload: Payload::Ref<'_>, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - match state { - BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - let (expected_withdrawals, partial_withdrawals_count) = - get_expected_withdrawals(state, spec)?; - let expected_root = expected_withdrawals.tree_hash_root(); - let withdrawals_root = payload.withdrawals_root()?; - - if expected_root != withdrawals_root { - return Err(BlockProcessingError::WithdrawalsRootMismatch { - expected: expected_root, - found: withdrawals_root, - }); - } + if state.fork_name_unchecked().capella_enabled() { + let (expected_withdrawals, partial_withdrawals_count) = + get_expected_withdrawals(state, spec)?; + let expected_root = expected_withdrawals.tree_hash_root(); + let withdrawals_root = payload.withdrawals_root()?; + + if expected_root != withdrawals_root { + return Err(BlockProcessingError::WithdrawalsRootMismatch { + expected: expected_root, + found: withdrawals_root, + }); + } - for withdrawal in expected_withdrawals.iter() { - decrease_balance( - state, - withdrawal.validator_index as usize, - withdrawal.amount, - )?; - } + for withdrawal in expected_withdrawals.iter() { + decrease_balance( + state, + withdrawal.validator_index as usize, + withdrawal.amount, + )?; + } - // Update pending partial withdrawals [New in Electra:EIP7251] - if let Some(partial_withdrawals_count) = partial_withdrawals_count { - // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 - let new_partial_withdrawals = state - .pending_partial_withdrawals()? - .iter_from(partial_withdrawals_count)? - .cloned() - .collect::>(); - *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; - } + // Update pending partial withdrawals [New in Electra:EIP7251] + if let Some(partial_withdrawals_count) = partial_withdrawals_count { + // TODO(electra): Use efficient pop_front after milhouse release https://github.com/sigp/milhouse/pull/38 + let new_partial_withdrawals = state + .pending_partial_withdrawals()? + .iter_from(partial_withdrawals_count)? + .cloned() + .collect::>(); + *state.pending_partial_withdrawals_mut()? = List::new(new_partial_withdrawals)?; + } - // Update the next withdrawal index if this block contained withdrawals - if let Some(latest_withdrawal) = expected_withdrawals.last() { - *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - - // Update the next validator index to start the next withdrawal sweep - if expected_withdrawals.len() == E::max_withdrawals_per_payload() { - // Next sweep starts after the latest withdrawal's validator index - let next_validator_index = latest_withdrawal - .validator_index - .safe_add(1)? - .safe_rem(state.validators().len() as u64)?; - *state.next_withdrawal_validator_index_mut()? = next_validator_index; - } - } + // Update the next withdrawal index if this block contained withdrawals + if let Some(latest_withdrawal) = expected_withdrawals.last() { + *state.next_withdrawal_index_mut()? = latest_withdrawal.index.safe_add(1)?; - // Advance sweep by the max length of the sweep if there was not a full set of withdrawals - if expected_withdrawals.len() != E::max_withdrawals_per_payload() { - let next_validator_index = state - .next_withdrawal_validator_index()? - .safe_add(spec.max_validators_per_withdrawals_sweep)? + // Update the next validator index to start the next withdrawal sweep + if expected_withdrawals.len() == E::max_withdrawals_per_payload() { + // Next sweep starts after the latest withdrawal's validator index + let next_validator_index = latest_withdrawal + .validator_index + .safe_add(1)? .safe_rem(state.validators().len() as u64)?; *state.next_withdrawal_validator_index_mut()? = next_validator_index; } + } - Ok(()) + // Advance sweep by the max length of the sweep if there was not a full set of withdrawals + if expected_withdrawals.len() != E::max_withdrawals_per_payload() { + let next_validator_index = state + .next_withdrawal_validator_index()? + .safe_add(spec.max_validators_per_withdrawals_sweep)? + .safe_rem(state.validators().len() as u64)?; + *state.next_withdrawal_validator_index_mut()? = next_validator_index; } + + Ok(()) + } else { // these shouldn't even be encountered but they're here for completeness - BeaconState::Base(_) | BeaconState::Altair(_) | BeaconState::Bellatrix(_) => Ok(()), + Ok(()) } } diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 22d8592364..4977f7c7e9 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -284,29 +284,22 @@ pub fn process_attestations>( ctxt: &mut ConsensusContext, spec: &ChainSpec, ) -> Result<(), BlockProcessingError> { - match block_body { - BeaconBlockBodyRef::Base(_) => { - base::process_attestations( - state, - block_body.attestations(), - verify_signatures, - ctxt, - spec, - )?; - } - BeaconBlockBodyRef::Altair(_) - | BeaconBlockBodyRef::Bellatrix(_) - | BeaconBlockBodyRef::Capella(_) - | BeaconBlockBodyRef::Deneb(_) - | BeaconBlockBodyRef::Electra(_) => { - altair_deneb::process_attestations( - state, - block_body.attestations(), - verify_signatures, - ctxt, - spec, - )?; - } + if state.fork_name_unchecked().altair_enabled() { + altair_deneb::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; + } else { + base::process_attestations( + state, + block_body.attestations(), + verify_signatures, + ctxt, + spec, + )?; } Ok(()) } diff --git a/consensus/state_processing/src/per_block_processing/signature_sets.rs b/consensus/state_processing/src/per_block_processing/signature_sets.rs index 2e00ee0341..39f438f97f 100644 --- a/consensus/state_processing/src/per_block_processing/signature_sets.rs +++ b/consensus/state_processing/src/per_block_processing/signature_sets.rs @@ -387,22 +387,20 @@ where let exit = &signed_exit.message; let proposer_index = exit.validator_index as usize; - let domain = match state { - BeaconState::Base(_) - | BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) => spec.get_domain( - exit.epoch, - Domain::VoluntaryExit, - &state.fork(), - state.genesis_validators_root(), - ), + let domain = if state.fork_name_unchecked().deneb_enabled() { // EIP-7044 - BeaconState::Deneb(_) | BeaconState::Electra(_) => spec.compute_domain( + spec.compute_domain( Domain::VoluntaryExit, spec.capella_fork_version, state.genesis_validators_root(), - ), + ) + } else { + spec.get_domain( + exit.epoch, + Domain::VoluntaryExit, + &state.fork(), + state.genesis_validators_root(), + ) }; let message = exit.signing_root(domain); diff --git a/consensus/state_processing/src/per_block_processing/verify_attestation.rs b/consensus/state_processing/src/per_block_processing/verify_attestation.rs index 6bfb51d475..0b399bea6c 100644 --- a/consensus/state_processing/src/per_block_processing/verify_attestation.rs +++ b/consensus/state_processing/src/per_block_processing/verify_attestation.rs @@ -32,21 +32,16 @@ pub fn verify_attestation_for_block_inclusion<'ctxt, E: EthSpec>( attestation: data.slot, } ); - match state { - BeaconState::Base(_) - | BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) => { - verify!( - state.slot() <= data.slot.safe_add(E::slots_per_epoch())?, - Invalid::IncludedTooLate { - state: state.slot(), - attestation: data.slot, - } - ); - } + if state.fork_name_unchecked().deneb_enabled() { // [Modified in Deneb:EIP7045] - BeaconState::Deneb(_) | BeaconState::Electra(_) => {} + } else { + verify!( + state.slot() <= data.slot.safe_add(E::slots_per_epoch())?, + Invalid::IncludedTooLate { + state: state.slot(), + attestation: data.slot, + } + ); } verify_attestation_for_state(state, attestation, ctxt, verify_signatures, spec) diff --git a/consensus/state_processing/src/per_epoch_processing.rs b/consensus/state_processing/src/per_epoch_processing.rs index 55e8853f3f..41c30c4931 100644 --- a/consensus/state_processing/src/per_epoch_processing.rs +++ b/consensus/state_processing/src/per_epoch_processing.rs @@ -41,13 +41,10 @@ pub fn process_epoch( .fork_name(spec) .map_err(Error::InconsistentStateFork)?; - match state { - BeaconState::Base(_) => base::process_epoch(state, spec), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_epoch(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_epoch(state, spec) + } else { + base::process_epoch(state, spec) } } diff --git a/consensus/state_processing/src/per_slot_processing.rs b/consensus/state_processing/src/per_slot_processing.rs index 6554423199..af1cce602c 100644 --- a/consensus/state_processing/src/per_slot_processing.rs +++ b/consensus/state_processing/src/per_slot_processing.rs @@ -1,6 +1,6 @@ use crate::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, + upgrade_to_electra, upgrade_to_fulu, }; use crate::{per_epoch_processing::EpochProcessingSummary, *}; use safe_arith::{ArithError, SafeArith}; @@ -71,6 +71,11 @@ pub fn per_slot_processing( upgrade_to_electra(state, spec)?; } + // Fulu. + if spec.fulu_fork_epoch == Some(state.current_epoch()) { + upgrade_to_fulu(state, spec)?; + } + // Additionally build all caches so that all valid states that are advanced always have // committee caches built, and we don't have to worry about initialising them at higher // layers. diff --git a/consensus/state_processing/src/upgrade.rs b/consensus/state_processing/src/upgrade.rs index 93cafa73d0..88bc87849f 100644 --- a/consensus/state_processing/src/upgrade.rs +++ b/consensus/state_processing/src/upgrade.rs @@ -3,9 +3,11 @@ pub mod bellatrix; pub mod capella; pub mod deneb; pub mod electra; +pub mod fulu; pub use altair::upgrade_to_altair; pub use bellatrix::upgrade_to_bellatrix; pub use capella::upgrade_to_capella; pub use deneb::upgrade_to_deneb; pub use electra::upgrade_to_electra; +pub use fulu::upgrade_to_fulu; diff --git a/consensus/state_processing/src/upgrade/fulu.rs b/consensus/state_processing/src/upgrade/fulu.rs new file mode 100644 index 0000000000..6e0cd3fa9d --- /dev/null +++ b/consensus/state_processing/src/upgrade/fulu.rs @@ -0,0 +1,94 @@ +use std::mem; +use types::{BeaconState, BeaconStateError as Error, BeaconStateFulu, ChainSpec, EthSpec, Fork}; + +/// Transform a `Electra` state into an `Fulu` state. +pub fn upgrade_to_fulu( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result<(), Error> { + let _epoch = pre_state.current_epoch(); + + let post = upgrade_state_to_fulu(pre_state, spec)?; + + *pre_state = post; + + Ok(()) +} + +pub fn upgrade_state_to_fulu( + pre_state: &mut BeaconState, + spec: &ChainSpec, +) -> Result, Error> { + let epoch = pre_state.current_epoch(); + let pre = pre_state.as_electra_mut()?; + // Where possible, use something like `mem::take` to move fields from behind the &mut + // reference. For other fields that don't have a good default value, use `clone`. + // + // Fixed size vectors get cloned because replacing them would require the same size + // allocation as cloning. + let post = BeaconState::Fulu(BeaconStateFulu { + // Versioning + genesis_time: pre.genesis_time, + genesis_validators_root: pre.genesis_validators_root, + slot: pre.slot, + fork: Fork { + previous_version: pre.fork.current_version, + current_version: spec.fulu_fork_version, + epoch, + }, + // History + latest_block_header: pre.latest_block_header.clone(), + block_roots: pre.block_roots.clone(), + state_roots: pre.state_roots.clone(), + historical_roots: mem::take(&mut pre.historical_roots), + // Eth1 + eth1_data: pre.eth1_data.clone(), + eth1_data_votes: mem::take(&mut pre.eth1_data_votes), + eth1_deposit_index: pre.eth1_deposit_index, + // Registry + validators: mem::take(&mut pre.validators), + balances: mem::take(&mut pre.balances), + // Randomness + randao_mixes: pre.randao_mixes.clone(), + // Slashings + slashings: pre.slashings.clone(), + // `Participation + previous_epoch_participation: mem::take(&mut pre.previous_epoch_participation), + current_epoch_participation: mem::take(&mut pre.current_epoch_participation), + // Finality + justification_bits: pre.justification_bits.clone(), + previous_justified_checkpoint: pre.previous_justified_checkpoint, + current_justified_checkpoint: pre.current_justified_checkpoint, + finalized_checkpoint: pre.finalized_checkpoint, + // Inactivity + inactivity_scores: mem::take(&mut pre.inactivity_scores), + // Sync committees + current_sync_committee: pre.current_sync_committee.clone(), + next_sync_committee: pre.next_sync_committee.clone(), + // Execution + latest_execution_payload_header: pre.latest_execution_payload_header.upgrade_to_fulu(), + // Capella + next_withdrawal_index: pre.next_withdrawal_index, + next_withdrawal_validator_index: pre.next_withdrawal_validator_index, + historical_summaries: pre.historical_summaries.clone(), + // Electra + deposit_requests_start_index: pre.deposit_requests_start_index, + deposit_balance_to_consume: pre.deposit_balance_to_consume, + exit_balance_to_consume: pre.exit_balance_to_consume, + earliest_exit_epoch: pre.earliest_exit_epoch, + consolidation_balance_to_consume: pre.consolidation_balance_to_consume, + earliest_consolidation_epoch: pre.earliest_consolidation_epoch, + pending_deposits: pre.pending_deposits.clone(), + pending_partial_withdrawals: pre.pending_partial_withdrawals.clone(), + pending_consolidations: pre.pending_consolidations.clone(), + // Caches + total_active_balance: pre.total_active_balance, + progressive_balances_cache: mem::take(&mut pre.progressive_balances_cache), + committee_caches: mem::take(&mut pre.committee_caches), + pubkey_cache: mem::take(&mut pre.pubkey_cache), + exit_cache: mem::take(&mut pre.exit_cache), + slashings_cache: mem::take(&mut pre.slashings_cache), + epoch_cache: mem::take(&mut pre.epoch_cache), + }); + Ok(post) +} diff --git a/consensus/types/presets/gnosis/fulu.yaml b/consensus/types/presets/gnosis/fulu.yaml new file mode 100644 index 0000000000..35a7c98fbf --- /dev/null +++ b/consensus/types/presets/gnosis/fulu.yaml @@ -0,0 +1,3 @@ +# Gnosis preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/presets/mainnet/fulu.yaml b/consensus/types/presets/mainnet/fulu.yaml new file mode 100644 index 0000000000..8aa9ccdcc3 --- /dev/null +++ b/consensus/types/presets/mainnet/fulu.yaml @@ -0,0 +1,3 @@ +# Mainnet preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/presets/minimal/fulu.yaml b/consensus/types/presets/minimal/fulu.yaml new file mode 100644 index 0000000000..121c9858f4 --- /dev/null +++ b/consensus/types/presets/minimal/fulu.yaml @@ -0,0 +1,3 @@ +# Minimal preset - Fulu + +FULU_PLACEHOLDER: 0 diff --git a/consensus/types/src/beacon_block.rs b/consensus/types/src/beacon_block.rs index 801b7dd1c7..d72550aa12 100644 --- a/consensus/types/src/beacon_block.rs +++ b/consensus/types/src/beacon_block.rs @@ -16,7 +16,7 @@ use self::indexed_attestation::{IndexedAttestationBase, IndexedAttestationElectr /// A block of the `BeaconChain`. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -75,6 +75,8 @@ pub struct BeaconBlock = FullPayload pub body: BeaconBlockBodyDeneb, #[superstruct(only(Electra), partial_getter(rename = "body_electra"))] pub body: BeaconBlockBodyElectra, + #[superstruct(only(Fulu), partial_getter(rename = "body_fulu"))] + pub body: BeaconBlockBodyFulu, } pub type BlindedBeaconBlock = BeaconBlock>; @@ -127,8 +129,9 @@ impl> BeaconBlock { /// Usually it's better to prefer `from_ssz_bytes` which will decode the correct variant based /// on the fork slot. pub fn any_from_ssz_bytes(bytes: &[u8]) -> Result { - BeaconBlockElectra::from_ssz_bytes(bytes) - .map(BeaconBlock::Electra) + BeaconBlockFulu::from_ssz_bytes(bytes) + .map(BeaconBlock::Fulu) + .or_else(|_| BeaconBlockElectra::from_ssz_bytes(bytes).map(BeaconBlock::Electra)) .or_else(|_| BeaconBlockDeneb::from_ssz_bytes(bytes).map(BeaconBlock::Deneb)) .or_else(|_| BeaconBlockCapella::from_ssz_bytes(bytes).map(BeaconBlock::Capella)) .or_else(|_| BeaconBlockBellatrix::from_ssz_bytes(bytes).map(BeaconBlock::Bellatrix)) @@ -226,6 +229,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockRef<'a, E, Payl BeaconBlockRef::Capella { .. } => ForkName::Capella, BeaconBlockRef::Deneb { .. } => ForkName::Deneb, BeaconBlockRef::Electra { .. } => ForkName::Electra, + BeaconBlockRef::Fulu { .. } => ForkName::Fulu, } } @@ -704,6 +708,110 @@ impl> EmptyBlock for BeaconBlockElec } } +impl> BeaconBlockFulu { + /// Return a Fulu block where the block has maximum size. + pub fn full(spec: &ChainSpec) -> Self { + let base_block: BeaconBlockBase<_, Payload> = BeaconBlockBase::full(spec); + let indexed_attestation: IndexedAttestationElectra = IndexedAttestationElectra { + attesting_indices: VariableList::new(vec![0_u64; E::MaxValidatorsPerSlot::to_usize()]) + .unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + }; + let attester_slashings = vec![ + AttesterSlashingElectra { + attestation_1: indexed_attestation.clone(), + attestation_2: indexed_attestation, + }; + E::max_attester_slashings_electra() + ] + .into(); + let attestation = AttestationElectra { + aggregation_bits: BitList::with_capacity(E::MaxValidatorsPerSlot::to_usize()).unwrap(), + data: AttestationData::default(), + signature: AggregateSignature::empty(), + committee_bits: BitVector::new(), + }; + let mut attestations_electra = vec![]; + for _ in 0..E::MaxAttestationsElectra::to_usize() { + attestations_electra.push(attestation.clone()); + } + + let bls_to_execution_changes = vec![ + SignedBlsToExecutionChange { + message: BlsToExecutionChange { + validator_index: 0, + from_bls_pubkey: PublicKeyBytes::empty(), + to_execution_address: Address::ZERO, + }, + signature: Signature::empty() + }; + E::max_bls_to_execution_changes() + ] + .into(); + let sync_aggregate = SyncAggregate { + sync_committee_signature: AggregateSignature::empty(), + sync_committee_bits: BitVector::default(), + }; + BeaconBlockFulu { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + proposer_slashings: base_block.body.proposer_slashings, + attester_slashings, + attestations: attestations_electra.into(), + deposits: base_block.body.deposits, + voluntary_exits: base_block.body.voluntary_exits, + bls_to_execution_changes, + sync_aggregate, + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + execution_payload: Payload::Fulu::default(), + blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), + }, + } + } +} + +impl> EmptyBlock for BeaconBlockFulu { + /// Returns an empty Fulu block to be used during genesis. + fn empty(spec: &ChainSpec) -> Self { + BeaconBlockFulu { + slot: spec.genesis_slot, + proposer_index: 0, + parent_root: Hash256::zero(), + state_root: Hash256::zero(), + body: BeaconBlockBodyFulu { + randao_reveal: Signature::empty(), + eth1_data: Eth1Data { + deposit_root: Hash256::zero(), + block_hash: Hash256::zero(), + deposit_count: 0, + }, + graffiti: Graffiti::default(), + proposer_slashings: VariableList::empty(), + attester_slashings: VariableList::empty(), + attestations: VariableList::empty(), + deposits: VariableList::empty(), + voluntary_exits: VariableList::empty(), + sync_aggregate: SyncAggregate::empty(), + execution_payload: Payload::Fulu::default(), + bls_to_execution_changes: VariableList::empty(), + blob_kzg_commitments: VariableList::empty(), + execution_requests: ExecutionRequests::default(), + }, + } + } +} + // We can convert pre-Bellatrix blocks without payloads into blocks "with" payloads. impl From>> for BeaconBlockBase> @@ -785,6 +893,7 @@ impl_from!(BeaconBlockBellatrix, >, >, |b impl_from!(BeaconBlockCapella, >, >, |body: BeaconBlockBodyCapella<_, _>| body.into()); impl_from!(BeaconBlockDeneb, >, >, |body: BeaconBlockBodyDeneb<_, _>| body.into()); impl_from!(BeaconBlockElectra, >, >, |body: BeaconBlockBodyElectra<_, _>| body.into()); +impl_from!(BeaconBlockFulu, >, >, |body: BeaconBlockBodyFulu<_, _>| body.into()); // We can clone blocks with payloads to blocks without payloads, without cloning the payload. macro_rules! impl_clone_as_blinded { @@ -818,6 +927,7 @@ impl_clone_as_blinded!(BeaconBlockBellatrix, >, >, >); impl_clone_as_blinded!(BeaconBlockDeneb, >, >); impl_clone_as_blinded!(BeaconBlockElectra, >, >); +impl_clone_as_blinded!(BeaconBlockFulu, >, >); // A reference to a full beacon block can be cloned into a blinded beacon block, without cloning the // execution payload. @@ -988,6 +1098,26 @@ mod tests { }); } + #[test] + fn roundtrip_fulu_block() { + let rng = &mut XorShiftRng::from_seed([42; 16]); + let spec = &ForkName::Fulu.make_genesis_spec(MainnetEthSpec::default_spec()); + + let inner_block = BeaconBlockFulu { + slot: Slot::random_for_test(rng), + proposer_index: u64::random_for_test(rng), + parent_root: Hash256::random_for_test(rng), + state_root: Hash256::random_for_test(rng), + body: BeaconBlockBodyFulu::random_for_test(rng), + }; + + let block = BeaconBlock::Fulu(inner_block.clone()); + + test_ssz_tree_hash_pair_with(&block, &inner_block, |bytes| { + BeaconBlock::from_ssz_bytes(bytes, spec) + }); + } + #[test] fn decode_base_and_altair() { type E = MainnetEthSpec; @@ -1007,11 +1137,14 @@ mod tests { let deneb_slot = deneb_epoch.start_slot(E::slots_per_epoch()); let electra_epoch = deneb_epoch + 1; let electra_slot = electra_epoch.start_slot(E::slots_per_epoch()); + let fulu_epoch = electra_epoch + 1; + let fulu_slot = fulu_epoch.start_slot(E::slots_per_epoch()); spec.altair_fork_epoch = Some(altair_epoch); spec.capella_fork_epoch = Some(capella_epoch); spec.deneb_fork_epoch = Some(deneb_epoch); spec.electra_fork_epoch = Some(electra_epoch); + spec.fulu_fork_epoch = Some(fulu_epoch); // BeaconBlockBase { @@ -1122,5 +1255,29 @@ mod tests { BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) .expect_err("bad electra block cannot be decoded"); } + + // BeaconBlockFulu + { + let good_block = BeaconBlock::Fulu(BeaconBlockFulu { + slot: fulu_slot, + ..<_>::random_for_test(rng) + }); + // It's invalid to have a Fulu block with a epoch lower than the fork epoch. + let _bad_block = { + let mut bad = good_block.clone(); + *bad.slot_mut() = electra_slot; + bad + }; + + assert_eq!( + BeaconBlock::from_ssz_bytes(&good_block.as_ssz_bytes(), &spec) + .expect("good fulu block can be decoded"), + good_block + ); + // TODO(fulu): Uncomment once Fulu has features since without features + // and with an Electra slot it decodes successfully to Electra. + //BeaconBlock::from_ssz_bytes(&bad_block.as_ssz_bytes(), &spec) + // .expect_err("bad fulu block cannot be decoded"); + } } } diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index f7a701fed6..a198cdf28f 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -30,7 +30,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; /// /// This *superstruct* abstracts over the hard-fork. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -58,6 +58,7 @@ pub const BLOB_KZG_COMMITMENTS_INDEX: usize = 11; Capella(metastruct(mappings(beacon_block_body_capella_fields(groups(fields))))), Deneb(metastruct(mappings(beacon_block_body_deneb_fields(groups(fields))))), Electra(metastruct(mappings(beacon_block_body_electra_fields(groups(fields))))), + Fulu(metastruct(mappings(beacon_block_body_fulu_fields(groups(fields))))) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), partial_getter_error(ty = "Error", expr = "Error::IncorrectStateVariant") @@ -77,7 +78,10 @@ pub struct BeaconBlockBody = FullPay partial_getter(rename = "attester_slashings_base") )] pub attester_slashings: VariableList, E::MaxAttesterSlashings>, - #[superstruct(only(Electra), partial_getter(rename = "attester_slashings_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "attester_slashings_electra") + )] pub attester_slashings: VariableList, E::MaxAttesterSlashingsElectra>, #[superstruct( @@ -85,11 +89,11 @@ pub struct BeaconBlockBody = FullPay partial_getter(rename = "attestations_base") )] pub attestations: VariableList, E::MaxAttestations>, - #[superstruct(only(Electra), partial_getter(rename = "attestations_electra"))] + #[superstruct(only(Electra, Fulu), partial_getter(rename = "attestations_electra"))] pub attestations: VariableList, E::MaxAttestationsElectra>, pub deposits: VariableList, pub voluntary_exits: VariableList, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] pub sync_aggregate: SyncAggregate, // We flatten the execution payload so that serde can use the name of the inner type, // either `execution_payload` for full payloads, or `execution_payload_header` for blinded @@ -109,12 +113,15 @@ pub struct BeaconBlockBody = FullPay #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] #[serde(flatten)] pub execution_payload: Payload::Electra, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + #[serde(flatten)] + pub execution_payload: Payload::Fulu, + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub bls_to_execution_changes: VariableList, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub execution_requests: ExecutionRequests, #[superstruct(only(Base, Altair))] #[metastruct(exclude_from(fields))] @@ -144,6 +151,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Deneb(body) => Ok(Payload::Ref::from(&body.execution_payload)), Self::Electra(body) => Ok(Payload::Ref::from(&body.execution_payload)), + Self::Fulu(body) => Ok(Payload::Ref::from(&body.execution_payload)), } } @@ -174,6 +182,10 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, beacon_block_body_electra_fields!(body, |_, field| leaves .push(field.tree_hash_root())); } + Self::Fulu(body) => { + beacon_block_body_fulu_fields!(body, |_, field| leaves + .push(field.tree_hash_root())); + } } leaves } @@ -202,7 +214,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Base(_) | Self::Altair(_) | Self::Bellatrix(_) | Self::Capella(_) => { Err(Error::IncorrectStateVariant) } - Self::Deneb(_) | Self::Electra(_) => { + Self::Deneb(_) | Self::Electra(_) | Self::Fulu(_) => { // We compute the branches by generating 2 merkle trees: // 1. Merkle tree for the `blob_kzg_commitments` List object // 2. Merkle tree for the `BeaconBlockBody` container @@ -294,6 +306,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => body.attestations.len(), Self::Deneb(body) => body.attestations.len(), Self::Electra(body) => body.attestations.len(), + Self::Fulu(body) => body.attestations.len(), } } @@ -305,6 +318,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => body.attester_slashings.len(), Self::Deneb(body) => body.attester_slashings.len(), Self::Electra(body) => body.attester_slashings.len(), + Self::Fulu(body) => body.attester_slashings.len(), } } @@ -316,6 +330,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, Self::Capella(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), Self::Deneb(body) => Box::new(body.attestations.iter().map(AttestationRef::Base)), Self::Electra(body) => Box::new(body.attestations.iter().map(AttestationRef::Electra)), + Self::Fulu(body) => Box::new(body.attestations.iter().map(AttestationRef::Electra)), } } @@ -351,6 +366,11 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, .iter() .map(AttesterSlashingRef::Electra), ), + Self::Fulu(body) => Box::new( + body.attester_slashings + .iter() + .map(AttesterSlashingRef::Electra), + ), } } } @@ -376,6 +396,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRefMut<'a, Self::Electra(body) => { Box::new(body.attestations.iter_mut().map(AttestationRefMut::Electra)) } + Self::Fulu(body) => { + Box::new(body.attestations.iter_mut().map(AttestationRefMut::Electra)) + } } } } @@ -390,6 +413,7 @@ impl> BeaconBlockBodyRef<'_, E, Payl BeaconBlockBodyRef::Capella { .. } => ForkName::Capella, BeaconBlockBodyRef::Deneb { .. } => ForkName::Deneb, BeaconBlockBodyRef::Electra { .. } => ForkName::Electra, + BeaconBlockBodyRef::Fulu { .. } => ForkName::Fulu, } } } @@ -704,6 +728,52 @@ impl From>> } } +impl From>> + for ( + BeaconBlockBodyFulu>, + Option>, + ) +{ + fn from(body: BeaconBlockBodyFulu>) -> Self { + let BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + } = body; + + ( + BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadFulu { + execution_payload_header: From::from(&execution_payload), + }, + bls_to_execution_changes, + blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests, + }, + Some(execution_payload), + ) + } +} + // We can clone a full block into a blinded block, without cloning the payload. impl BeaconBlockBodyBase> { pub fn clone_as_blinded(&self) -> BeaconBlockBodyBase> { @@ -859,6 +929,44 @@ impl BeaconBlockBodyElectra> { } } +impl BeaconBlockBodyFulu> { + pub fn clone_as_blinded(&self) -> BeaconBlockBodyFulu> { + let BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + } = self; + + BeaconBlockBodyFulu { + randao_reveal: randao_reveal.clone(), + eth1_data: eth1_data.clone(), + graffiti: *graffiti, + proposer_slashings: proposer_slashings.clone(), + attester_slashings: attester_slashings.clone(), + attestations: attestations.clone(), + deposits: deposits.clone(), + voluntary_exits: voluntary_exits.clone(), + sync_aggregate: sync_aggregate.clone(), + execution_payload: BlindedPayloadFulu { + execution_payload_header: execution_payload.into(), + }, + bls_to_execution_changes: bls_to_execution_changes.clone(), + blob_kzg_commitments: blob_kzg_commitments.clone(), + execution_requests: execution_requests.clone(), + } + } +} + impl From>> for ( BeaconBlockBody>, diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 05f28744fa..de6077bf94 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -223,7 +223,7 @@ impl From for Hash256 { /// /// https://github.com/sigp/milhouse/issues/43 #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Derivative, @@ -326,6 +326,20 @@ impl From for Hash256 { groups(tree_lists) )), num_fields(all()), + )), + Fulu(metastruct( + mappings( + map_beacon_state_fulu_fields(), + map_beacon_state_fulu_tree_list_fields(mutable, fallible, groups(tree_lists)), + map_beacon_state_fulu_tree_list_fields_immutable(groups(tree_lists)), + ), + bimappings(bimap_beacon_state_fulu_tree_list_fields( + other_type = "BeaconStateFulu", + self_mutable, + fallible, + groups(tree_lists) + )), + num_fields(all()), )) ), cast_error(ty = "Error", expr = "Error::IncorrectStateVariant"), @@ -408,11 +422,11 @@ where // Participation (Altair and later) #[compare_fields(as_iter)] - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] #[compare_fields(as_iter)] pub previous_epoch_participation: List, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub current_epoch_participation: List, @@ -432,15 +446,15 @@ where // Inactivity #[serde(with = "ssz_types::serde_utils::quoted_u64_var_list")] - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub inactivity_scores: List, // Light-client sync committees - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[metastruct(exclude_from(tree_lists))] pub current_sync_committee: Arc>, - #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra))] + #[superstruct(only(Altair, Bellatrix, Capella, Deneb, Electra, Fulu))] #[metastruct(exclude_from(tree_lists))] pub next_sync_committee: Arc>, @@ -469,56 +483,62 @@ where )] #[metastruct(exclude_from(tree_lists))] pub latest_execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct( + only(Fulu), + partial_getter(rename = "latest_execution_payload_header_fulu") + )] + #[metastruct(exclude_from(tree_lists))] + pub latest_execution_payload_header: ExecutionPayloadHeaderFulu, // Capella - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_index: u64, - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] #[metastruct(exclude_from(tree_lists))] pub next_withdrawal_validator_index: u64, // Deep history valid from Capella onwards. - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] #[test_random(default)] pub historical_summaries: List, // Electra - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub deposit_requests_start_index: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub deposit_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub exit_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] pub earliest_exit_epoch: Epoch, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] #[serde(with = "serde_utils::quoted_u64")] pub consolidation_balance_to_consume: u64, - #[superstruct(only(Electra), partial_getter(copy))] + #[superstruct(only(Electra, Fulu), partial_getter(copy))] #[metastruct(exclude_from(tree_lists))] pub earliest_consolidation_epoch: Epoch, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_deposits: List, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_partial_withdrawals: List, #[compare_fields(as_iter)] #[test_random(default)] - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] pub pending_consolidations: List, // Caching (not in the spec) @@ -659,6 +679,7 @@ impl BeaconState { BeaconState::Capella { .. } => ForkName::Capella, BeaconState::Deneb { .. } => ForkName::Deneb, BeaconState::Electra { .. } => ForkName::Electra, + BeaconState::Fulu { .. } => ForkName::Fulu, } } @@ -948,6 +969,9 @@ impl BeaconState { BeaconState::Electra(state) => Ok(ExecutionPayloadHeaderRef::Electra( &state.latest_execution_payload_header, )), + BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRef::Fulu( + &state.latest_execution_payload_header, + )), } } @@ -968,6 +992,9 @@ impl BeaconState { BeaconState::Electra(state) => Ok(ExecutionPayloadHeaderRefMut::Electra( &mut state.latest_execution_payload_header, )), + BeaconState::Fulu(state) => Ok(ExecutionPayloadHeaderRefMut::Fulu( + &mut state.latest_execution_payload_header, + )), } } @@ -1481,6 +1508,16 @@ impl BeaconState { &mut state.exit_cache, &mut state.epoch_cache, )), + BeaconState::Fulu(state) => Ok(( + &mut state.validators, + &mut state.balances, + &state.previous_epoch_participation, + &state.current_epoch_participation, + &mut state.inactivity_scores, + &mut state.progressive_balances_cache, + &mut state.exit_cache, + &mut state.epoch_cache, + )), } } @@ -1662,10 +1699,12 @@ impl BeaconState { | BeaconState::Altair(_) | BeaconState::Bellatrix(_) | BeaconState::Capella(_) => self.get_validator_churn_limit(spec)?, - BeaconState::Deneb(_) | BeaconState::Electra(_) => std::cmp::min( - spec.max_per_epoch_activation_churn_limit, - self.get_validator_churn_limit(spec)?, - ), + BeaconState::Deneb(_) | BeaconState::Electra(_) | BeaconState::Fulu(_) => { + std::cmp::min( + spec.max_per_epoch_activation_churn_limit, + self.get_validator_churn_limit(spec)?, + ) + } }) } @@ -1783,6 +1822,7 @@ impl BeaconState { BeaconState::Capella(state) => Ok(&mut state.current_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.current_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.current_epoch_participation), + BeaconState::Fulu(state) => Ok(&mut state.current_epoch_participation), } } else if epoch == previous_epoch { match self { @@ -1792,6 +1832,7 @@ impl BeaconState { BeaconState::Capella(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Deneb(state) => Ok(&mut state.previous_epoch_participation), BeaconState::Electra(state) => Ok(&mut state.previous_epoch_participation), + BeaconState::Fulu(state) => Ok(&mut state.previous_epoch_participation), } } else { Err(BeaconStateError::EpochOutOfBounds) @@ -2045,6 +2086,11 @@ impl BeaconState { } ); } + Self::Fulu(self_inner) => { + map_beacon_state_fulu_tree_list_fields_immutable!(self_inner, |_, self_field| { + any_pending_mutations |= self_field.has_pending_updates(); + }); + } }; any_pending_mutations } @@ -2238,12 +2284,29 @@ impl BeaconState { exit_balance_to_consume .safe_add_assign(additional_epochs.safe_mul(per_epoch_churn)?)?; } - let state = self.as_electra_mut()?; - // Consume the balance and update state variables - state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; - state.earliest_exit_epoch = earliest_exit_epoch; + match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + BeaconState::Electra(_) => { + let state = self.as_electra_mut()?; + + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + Ok(state.earliest_exit_epoch) + } + BeaconState::Fulu(_) => { + let state = self.as_fulu_mut()?; - Ok(state.earliest_exit_epoch) + // Consume the balance and update state variables + state.exit_balance_to_consume = exit_balance_to_consume.safe_sub(exit_balance)?; + state.earliest_exit_epoch = earliest_exit_epoch; + Ok(state.earliest_exit_epoch) + } + } } pub fn compute_consolidation_epoch_and_update_churn( @@ -2277,13 +2340,31 @@ impl BeaconState { consolidation_balance_to_consume .safe_add_assign(additional_epochs.safe_mul(per_epoch_consolidation_churn)?)?; } - // Consume the balance and update state variables - let state = self.as_electra_mut()?; - state.consolidation_balance_to_consume = - consolidation_balance_to_consume.safe_sub(consolidation_balance)?; - state.earliest_consolidation_epoch = earliest_consolidation_epoch; - - Ok(state.earliest_consolidation_epoch) + match self { + BeaconState::Base(_) + | BeaconState::Altair(_) + | BeaconState::Bellatrix(_) + | BeaconState::Capella(_) + | BeaconState::Deneb(_) => Err(Error::IncorrectStateVariant), + BeaconState::Electra(_) => { + let state = self.as_electra_mut()?; + + // Consume the balance and update state variables. + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + Ok(state.earliest_consolidation_epoch) + } + BeaconState::Fulu(_) => { + let state = self.as_fulu_mut()?; + + // Consume the balance and update state variables. + state.consolidation_balance_to_consume = + consolidation_balance_to_consume.safe_sub(consolidation_balance)?; + state.earliest_consolidation_epoch = earliest_consolidation_epoch; + Ok(state.earliest_consolidation_epoch) + } + } } #[allow(clippy::arithmetic_side_effects)] @@ -2339,6 +2420,14 @@ impl BeaconState { ); } (Self::Electra(_), _) => (), + (Self::Fulu(self_inner), Self::Fulu(base_inner)) => { + bimap_beacon_state_fulu_tree_list_fields!( + self_inner, + base_inner, + |_, self_field, base_field| { self_field.rebase_on(base_field) } + ); + } + (Self::Fulu(_), _) => (), } // Use sync committees from `base` if they are equal. @@ -2411,6 +2500,7 @@ impl BeaconState { ForkName::Capella => BeaconStateCapella::::NUM_FIELDS.next_power_of_two(), ForkName::Deneb => BeaconStateDeneb::::NUM_FIELDS.next_power_of_two(), ForkName::Electra => BeaconStateElectra::::NUM_FIELDS.next_power_of_two(), + ForkName::Fulu => BeaconStateFulu::::NUM_FIELDS.next_power_of_two(), } } @@ -2459,6 +2549,9 @@ impl BeaconState { Self::Electra(inner) => { map_beacon_state_electra_tree_list_fields!(inner, |_, x| { x.apply_updates() }) } + Self::Fulu(inner) => { + map_beacon_state_fulu_tree_list_fields!(inner, |_, x| { x.apply_updates() }) + } } Ok(()) } @@ -2554,6 +2647,11 @@ impl BeaconState { leaves.push(field.tree_hash_root()); }); } + BeaconState::Fulu(state) => { + map_beacon_state_fulu_fields!(state, |_, field| { + leaves.push(field.tree_hash_root()); + }); + } }; leaves @@ -2611,6 +2709,7 @@ impl CompareFields for BeaconState { (BeaconState::Capella(x), BeaconState::Capella(y)) => x.compare_fields(y), (BeaconState::Deneb(x), BeaconState::Deneb(y)) => x.compare_fields(y), (BeaconState::Electra(x), BeaconState::Electra(y)) => x.compare_fields(y), + (BeaconState::Fulu(x), BeaconState::Fulu(y)) => x.compare_fields(y), _ => panic!("compare_fields: mismatched state variants",), } } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index bc258ef68d..8e8a1a6aa9 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -285,12 +285,5 @@ impl ProgressiveBalancesCache { /// `ProgressiveBalancesCache` is only enabled from `Altair` as it uses Altair-specific logic. pub fn is_progressive_balances_enabled(state: &BeaconState) -> bool { - match state { - BeaconState::Base(_) => false, - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => true, - } + state.fork_name_unchecked().altair_enabled() } diff --git a/consensus/types/src/builder_bid.rs b/consensus/types/src/builder_bid.rs index 9885f78474..2ce46ca704 100644 --- a/consensus/types/src/builder_bid.rs +++ b/consensus/types/src/builder_bid.rs @@ -1,8 +1,9 @@ use crate::beacon_block_body::KzgCommitments; use crate::{ ChainSpec, EthSpec, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, SignedRoot, Uint256, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, ForkName, ForkVersionDeserialize, + SignedRoot, Uint256, }; use bls::PublicKeyBytes; use bls::Signature; @@ -11,7 +12,7 @@ use superstruct::superstruct; use tree_hash_derive::TreeHash; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive(PartialEq, Debug, Serialize, Deserialize, TreeHash, Clone), serde(bound = "E: EthSpec", deny_unknown_fields) @@ -31,7 +32,9 @@ pub struct BuilderBid { pub header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "header_electra"))] pub header: ExecutionPayloadHeaderElectra, - #[superstruct(only(Deneb, Electra))] + #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] + pub header: ExecutionPayloadHeaderFulu, + #[superstruct(only(Deneb, Electra, Fulu))] pub blob_kzg_commitments: KzgCommitments, #[serde(with = "serde_utils::quoted_u256")] pub value: Uint256, @@ -85,6 +88,7 @@ impl ForkVersionDeserialize for BuilderBid { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "BuilderBid failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 9d3308cf23..f0bfeba680 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -192,6 +192,14 @@ pub struct ChainSpec { pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + /* + * Fulu hard fork params + */ + pub fulu_fork_version: [u8; 4], + /// The Fulu fork epoch is optional, with `None` representing "Fulu never happens". + pub fulu_fork_epoch: Option, + pub fulu_placeholder: u64, + /* * DAS params */ @@ -313,17 +321,20 @@ impl ChainSpec { /// Returns the name of the fork which is active at `epoch`. pub fn fork_name_at_epoch(&self, epoch: Epoch) -> ForkName { - match self.electra_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Electra, - _ => match self.deneb_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, - _ => match self.capella_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, - _ => match self.bellatrix_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Bellatrix, - _ => match self.altair_fork_epoch { - Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, - _ => ForkName::Base, + match self.fulu_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Fulu, + _ => match self.electra_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Electra, + _ => match self.deneb_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Deneb, + _ => match self.capella_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Capella, + _ => match self.bellatrix_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Bellatrix, + _ => match self.altair_fork_epoch { + Some(fork_epoch) if epoch >= fork_epoch => ForkName::Altair, + _ => ForkName::Base, + }, }, }, }, @@ -340,6 +351,7 @@ impl ChainSpec { ForkName::Capella => self.capella_fork_version, ForkName::Deneb => self.deneb_fork_version, ForkName::Electra => self.electra_fork_version, + ForkName::Fulu => self.fulu_fork_version, } } @@ -352,6 +364,7 @@ impl ChainSpec { ForkName::Capella => self.capella_fork_epoch, ForkName::Deneb => self.deneb_fork_epoch, ForkName::Electra => self.electra_fork_epoch, + ForkName::Fulu => self.fulu_fork_epoch, } } @@ -802,6 +815,13 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + /* + * Fulu hard fork params + */ + fulu_fork_version: [0x06, 0x00, 0x00, 0x00], + fulu_fork_epoch: None, + fulu_placeholder: 0, + /* * DAS params */ @@ -917,6 +937,9 @@ impl ChainSpec { u64::checked_pow(2, 7)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + // Fulu + fulu_fork_version: [0x06, 0x00, 0x00, 0x01], + fulu_fork_epoch: None, // PeerDAS eip7594_fork_epoch: None, // Other @@ -1121,6 +1144,13 @@ impl ChainSpec { }) .expect("calculation does not overflow"), + /* + * Fulu hard fork params + */ + fulu_fork_version: [0x06, 0x00, 0x00, 0x64], + fulu_fork_epoch: None, + fulu_placeholder: 0, + /* * DAS params */ @@ -1255,6 +1285,14 @@ pub struct Config { #[serde(deserialize_with = "deserialize_fork_epoch")] pub electra_fork_epoch: Option>, + #[serde(default = "default_fulu_fork_version")] + #[serde(with = "serde_utils::bytes_4_hex")] + fulu_fork_version: [u8; 4], + #[serde(default)] + #[serde(serialize_with = "serialize_fork_epoch")] + #[serde(deserialize_with = "deserialize_fork_epoch")] + pub fulu_fork_epoch: Option>, + #[serde(default)] #[serde(serialize_with = "serialize_fork_epoch")] #[serde(deserialize_with = "deserialize_fork_epoch")] @@ -1392,6 +1430,11 @@ fn default_electra_fork_version() -> [u8; 4] { [0xff, 0xff, 0xff, 0xff] } +fn default_fulu_fork_version() -> [u8; 4] { + // This value shouldn't be used. + [0xff, 0xff, 0xff, 0xff] +} + /// Placeholder value: 2^256-2^10 (115792089237316195423570985008687907853269984665640564039457584007913129638912). /// /// Taken from https://github.com/ethereum/consensus-specs/blob/d5e4828aecafaf1c57ef67a5f23c4ae7b08c5137/configs/mainnet.yaml#L15-L16 @@ -1655,6 +1698,11 @@ impl Config { .electra_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), + fulu_fork_version: spec.fulu_fork_version, + fulu_fork_epoch: spec + .fulu_fork_epoch + .map(|epoch| MaybeQuoted { value: epoch }), + eip7594_fork_epoch: spec .eip7594_fork_epoch .map(|epoch| MaybeQuoted { value: epoch }), @@ -1738,6 +1786,8 @@ impl Config { deneb_fork_version, electra_fork_epoch, electra_fork_version, + fulu_fork_epoch, + fulu_fork_version, eip7594_fork_epoch, seconds_per_slot, seconds_per_eth1_block, @@ -1801,6 +1851,8 @@ impl Config { deneb_fork_version, electra_fork_epoch: electra_fork_epoch.map(|q| q.value), electra_fork_version, + fulu_fork_epoch: fulu_fork_epoch.map(|q| q.value), + fulu_fork_version, eip7594_fork_epoch: eip7594_fork_epoch.map(|q| q.value), seconds_per_slot, seconds_per_eth1_block, diff --git a/consensus/types/src/config_and_preset.rs b/consensus/types/src/config_and_preset.rs index c80d678b2a..235bf20238 100644 --- a/consensus/types/src/config_and_preset.rs +++ b/consensus/types/src/config_and_preset.rs @@ -1,6 +1,6 @@ use crate::{ consts::altair, consts::deneb, AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, - ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, ForkName, + ChainSpec, Config, DenebPreset, ElectraPreset, EthSpec, ForkName, FuluPreset, }; use maplit::hashmap; use serde::{Deserialize, Serialize}; @@ -12,7 +12,7 @@ use superstruct::superstruct; /// /// Mostly useful for the API. #[superstruct( - variants(Capella, Deneb, Electra), + variants(Deneb, Electra, Fulu), variant_attributes(derive(Serialize, Deserialize, Debug, PartialEq, Clone)) )] #[derive(Serialize, Deserialize, Debug, PartialEq, Clone)] @@ -29,12 +29,14 @@ pub struct ConfigAndPreset { pub bellatrix_preset: BellatrixPreset, #[serde(flatten)] pub capella_preset: CapellaPreset, - #[superstruct(only(Deneb, Electra))] #[serde(flatten)] pub deneb_preset: DenebPreset, - #[superstruct(only(Electra))] + #[superstruct(only(Electra, Fulu))] #[serde(flatten)] pub electra_preset: ElectraPreset, + #[superstruct(only(Fulu))] + #[serde(flatten)] + pub fulu_preset: FuluPreset, /// The `extra_fields` map allows us to gracefully decode fields intended for future hard forks. #[serde(flatten)] pub extra_fields: HashMap, @@ -48,16 +50,17 @@ impl ConfigAndPreset { let altair_preset = AltairPreset::from_chain_spec::(spec); let bellatrix_preset = BellatrixPreset::from_chain_spec::(spec); let capella_preset = CapellaPreset::from_chain_spec::(spec); + let deneb_preset = DenebPreset::from_chain_spec::(spec); let extra_fields = get_extra_fields(spec); - if spec.electra_fork_epoch.is_some() + if spec.fulu_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Electra) + || fork_name == Some(ForkName::Fulu) { - let deneb_preset = DenebPreset::from_chain_spec::(spec); let electra_preset = ElectraPreset::from_chain_spec::(spec); + let fulu_preset = FuluPreset::from_chain_spec::(spec); - ConfigAndPreset::Electra(ConfigAndPresetElectra { + ConfigAndPreset::Fulu(ConfigAndPresetFulu { config, base_preset, altair_preset, @@ -65,29 +68,33 @@ impl ConfigAndPreset { capella_preset, deneb_preset, electra_preset, + fulu_preset, extra_fields, }) - } else if spec.deneb_fork_epoch.is_some() + } else if spec.electra_fork_epoch.is_some() || fork_name.is_none() - || fork_name == Some(ForkName::Deneb) + || fork_name == Some(ForkName::Electra) { - let deneb_preset = DenebPreset::from_chain_spec::(spec); - ConfigAndPreset::Deneb(ConfigAndPresetDeneb { + let electra_preset = ElectraPreset::from_chain_spec::(spec); + + ConfigAndPreset::Electra(ConfigAndPresetElectra { config, base_preset, altair_preset, bellatrix_preset, capella_preset, deneb_preset, + electra_preset, extra_fields, }) } else { - ConfigAndPreset::Capella(ConfigAndPresetCapella { + ConfigAndPreset::Deneb(ConfigAndPresetDeneb { config, base_preset, altair_preset, bellatrix_preset, capella_preset, + deneb_preset, extra_fields, }) } @@ -164,8 +171,8 @@ mod test { .write(false) .open(tmp_file.as_ref()) .expect("error while opening the file"); - let from: ConfigAndPresetElectra = + let from: ConfigAndPresetFulu = serde_yaml::from_reader(reader).expect("error while deserializing"); - assert_eq!(ConfigAndPreset::Electra(from), yamlconfig); + assert_eq!(ConfigAndPreset::Fulu(from), yamlconfig); } } diff --git a/consensus/types/src/execution_payload.rs b/consensus/types/src/execution_payload.rs index 9f16b676a6..c619d61487 100644 --- a/consensus/types/src/execution_payload.rs +++ b/consensus/types/src/execution_payload.rs @@ -15,7 +15,7 @@ pub type Transactions = VariableList< pub type Withdrawals = VariableList::MaxWithdrawalsPerPayload>; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Default, @@ -82,12 +82,12 @@ pub struct ExecutionPayload { pub block_hash: ExecutionBlockHash, #[serde(with = "ssz_types::serde_utils::list_of_hex_var_list")] pub transactions: Transactions, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub withdrawals: Withdrawals, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, } @@ -114,6 +114,7 @@ impl ExecutionPayload { ForkName::Capella => ExecutionPayloadCapella::from_ssz_bytes(bytes).map(Self::Capella), ForkName::Deneb => ExecutionPayloadDeneb::from_ssz_bytes(bytes).map(Self::Deneb), ForkName::Electra => ExecutionPayloadElectra::from_ssz_bytes(bytes).map(Self::Electra), + ForkName::Fulu => ExecutionPayloadFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } @@ -166,6 +167,19 @@ impl ExecutionPayload { // Max size of variable length `withdrawals` field + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) } + + #[allow(clippy::arithmetic_side_effects)] + /// Returns the maximum size of an execution payload. + pub fn max_execution_payload_fulu_size() -> usize { + // Fixed part + ExecutionPayloadFulu::::default().as_ssz_bytes().len() + // Max size of variable length `extra_data` field + + (E::max_extra_data_bytes() * ::ssz_fixed_len()) + // Max size of variable length `transactions` field + + (E::max_transactions_per_payload() * (ssz::BYTES_PER_LENGTH_OFFSET + E::max_bytes_per_transaction())) + // Max size of variable length `withdrawals` field + + (E::max_withdrawals_per_payload() * ::ssz_fixed_len()) + } } impl ForkVersionDeserialize for ExecutionPayload { @@ -184,6 +198,7 @@ impl ForkVersionDeserialize for ExecutionPayload { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayload failed to deserialize: unsupported fork '{}'", @@ -201,6 +216,7 @@ impl ExecutionPayload { ExecutionPayload::Capella(_) => ForkName::Capella, ExecutionPayload::Deneb(_) => ForkName::Deneb, ExecutionPayload::Electra(_) => ForkName::Electra, + ExecutionPayload::Fulu(_) => ForkName::Fulu, } } } diff --git a/consensus/types/src/execution_payload_header.rs b/consensus/types/src/execution_payload_header.rs index 4bfbfee9bf..3012041b8b 100644 --- a/consensus/types/src/execution_payload_header.rs +++ b/consensus/types/src/execution_payload_header.rs @@ -8,7 +8,7 @@ use tree_hash::TreeHash; use tree_hash_derive::TreeHash; #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Default, @@ -78,12 +78,12 @@ pub struct ExecutionPayloadHeader { pub block_hash: ExecutionBlockHash, #[superstruct(getter(copy))] pub transactions_root: Hash256, - #[superstruct(only(Capella, Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Capella, Deneb, Electra, Fulu), partial_getter(copy))] pub withdrawals_root: Hash256, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub blob_gas_used: u64, - #[superstruct(only(Deneb, Electra), partial_getter(copy))] + #[superstruct(only(Deneb, Electra, Fulu), partial_getter(copy))] #[serde(with = "serde_utils::quoted_u64")] pub excess_blob_gas: u64, } @@ -108,18 +108,18 @@ impl ExecutionPayloadHeader { ForkName::Electra => { ExecutionPayloadHeaderElectra::from_ssz_bytes(bytes).map(Self::Electra) } + ForkName::Fulu => ExecutionPayloadHeaderFulu::from_ssz_bytes(bytes).map(Self::Fulu), } } #[allow(clippy::arithmetic_side_effects)] pub fn ssz_max_var_len_for_fork(fork_name: ForkName) -> usize { - // Matching here in case variable fields are added in future forks. - match fork_name { - ForkName::Base | ForkName::Altair => 0, - ForkName::Bellatrix | ForkName::Capella | ForkName::Deneb | ForkName::Electra => { - // Max size of variable length `extra_data` field - E::max_extra_data_bytes() * ::ssz_fixed_len() - } + // TODO(newfork): Add a new case here if there are new variable fields + if fork_name.bellatrix_enabled() { + // Max size of variable length `extra_data` field + E::max_extra_data_bytes() * ::ssz_fixed_len() + } else { + 0 } } @@ -129,6 +129,7 @@ impl ExecutionPayloadHeader { ExecutionPayloadHeader::Capella(_) => ForkName::Capella, ExecutionPayloadHeader::Deneb(_) => ForkName::Deneb, ExecutionPayloadHeader::Electra(_) => ForkName::Electra, + ExecutionPayloadHeader::Fulu(_) => ForkName::Fulu, } } } @@ -212,6 +213,30 @@ impl ExecutionPayloadHeaderDeneb { } } +impl ExecutionPayloadHeaderElectra { + pub fn upgrade_to_fulu(&self) -> ExecutionPayloadHeaderFulu { + ExecutionPayloadHeaderFulu { + parent_hash: self.parent_hash, + fee_recipient: self.fee_recipient, + state_root: self.state_root, + receipts_root: self.receipts_root, + logs_bloom: self.logs_bloom.clone(), + prev_randao: self.prev_randao, + block_number: self.block_number, + gas_limit: self.gas_limit, + gas_used: self.gas_used, + timestamp: self.timestamp, + extra_data: self.extra_data.clone(), + base_fee_per_gas: self.base_fee_per_gas, + block_hash: self.block_hash, + transactions_root: self.transactions_root, + withdrawals_root: self.withdrawals_root, + blob_gas_used: self.blob_gas_used, + excess_blob_gas: self.excess_blob_gas, + } + } +} + impl<'a, E: EthSpec> From<&'a ExecutionPayloadBellatrix> for ExecutionPayloadHeaderBellatrix { fn from(payload: &'a ExecutionPayloadBellatrix) -> Self { Self { @@ -303,6 +328,30 @@ impl<'a, E: EthSpec> From<&'a ExecutionPayloadElectra> for ExecutionPayloadHe } } +impl<'a, E: EthSpec> From<&'a ExecutionPayloadFulu> for ExecutionPayloadHeaderFulu { + fn from(payload: &'a ExecutionPayloadFulu) -> Self { + Self { + parent_hash: payload.parent_hash, + fee_recipient: payload.fee_recipient, + state_root: payload.state_root, + receipts_root: payload.receipts_root, + logs_bloom: payload.logs_bloom.clone(), + prev_randao: payload.prev_randao, + block_number: payload.block_number, + gas_limit: payload.gas_limit, + gas_used: payload.gas_used, + timestamp: payload.timestamp, + extra_data: payload.extra_data.clone(), + base_fee_per_gas: payload.base_fee_per_gas, + block_hash: payload.block_hash, + transactions_root: payload.transactions.tree_hash_root(), + withdrawals_root: payload.withdrawals.tree_hash_root(), + blob_gas_used: payload.blob_gas_used, + excess_blob_gas: payload.excess_blob_gas, + } + } +} + // These impls are required to work around an inelegance in `to_execution_payload_header`. // They only clone headers so they should be relatively cheap. impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderBellatrix { @@ -329,6 +378,12 @@ impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderElectra { } } +impl<'a, E: EthSpec> From<&'a Self> for ExecutionPayloadHeaderFulu { + fn from(payload: &'a Self) -> Self { + payload.clone() + } +} + impl<'a, E: EthSpec> From> for ExecutionPayloadHeader { fn from(payload: ExecutionPayloadRef<'a, E>) -> Self { map_execution_payload_ref_into_execution_payload_header!( @@ -387,6 +442,9 @@ impl ExecutionPayloadHeaderRefMut<'_, E> { ExecutionPayloadHeaderRefMut::Electra(mut_ref) => { *mut_ref = header.try_into()?; } + ExecutionPayloadHeaderRefMut::Fulu(mut_ref) => { + *mut_ref = header.try_into()?; + } } Ok(()) } @@ -404,6 +462,16 @@ impl TryFrom> for ExecutionPayloadHeaderEl } } +impl TryFrom> for ExecutionPayloadHeaderFulu { + type Error = BeaconStateError; + fn try_from(header: ExecutionPayloadHeader) -> Result { + match header { + ExecutionPayloadHeader::Fulu(execution_payload_header) => Ok(execution_payload_header), + _ => Err(BeaconStateError::IncorrectStateVariant), + } + } +} + impl ForkVersionDeserialize for ExecutionPayloadHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, @@ -423,6 +491,7 @@ impl ForkVersionDeserialize for ExecutionPayloadHeader { ForkName::Capella => Self::Capella(serde_json::from_value(value).map_err(convert_err)?), ForkName::Deneb => Self::Deneb(serde_json::from_value(value).map_err(convert_err)?), ForkName::Electra => Self::Electra(serde_json::from_value(value).map_err(convert_err)?), + ForkName::Fulu => Self::Fulu(serde_json::from_value(value).map_err(convert_err)?), ForkName::Base | ForkName::Altair => { return Err(serde::de::Error::custom(format!( "ExecutionPayloadHeader failed to deserialize: unsupported fork '{}'", diff --git a/consensus/types/src/fork_context.rs b/consensus/types/src/fork_context.rs index 0f7f0eb769..33f1c51d44 100644 --- a/consensus/types/src/fork_context.rs +++ b/consensus/types/src/fork_context.rs @@ -69,6 +69,13 @@ impl ForkContext { )); } + if spec.fulu_fork_epoch.is_some() { + fork_to_digest.push(( + ForkName::Fulu, + ChainSpec::compute_fork_digest(spec.fulu_fork_version, genesis_validators_root), + )); + } + let fork_to_digest: HashMap = fork_to_digest.into_iter().collect(); let digest_to_fork = fork_to_digest diff --git a/consensus/types/src/fork_name.rs b/consensus/types/src/fork_name.rs index 51a5b3813b..b61e0a4d4a 100644 --- a/consensus/types/src/fork_name.rs +++ b/consensus/types/src/fork_name.rs @@ -17,6 +17,7 @@ pub enum ForkName { Capella, Deneb, Electra, + Fulu, } impl ForkName { @@ -28,6 +29,7 @@ impl ForkName { ForkName::Capella, ForkName::Deneb, ForkName::Electra, + ForkName::Fulu, ] } @@ -38,6 +40,7 @@ impl ForkName { (ForkName::Capella, spec.capella_fork_epoch), (ForkName::Deneb, spec.deneb_fork_epoch), (ForkName::Electra, spec.electra_fork_epoch), + (ForkName::Fulu, spec.fulu_fork_epoch), ] } @@ -57,6 +60,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Altair => { @@ -65,6 +69,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Bellatrix => { @@ -73,6 +78,7 @@ impl ForkName { spec.capella_fork_epoch = None; spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Capella => { @@ -81,6 +87,7 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = None; spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Deneb => { @@ -89,6 +96,7 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = None; + spec.fulu_fork_epoch = None; spec } ForkName::Electra => { @@ -97,6 +105,16 @@ impl ForkName { spec.capella_fork_epoch = Some(Epoch::new(0)); spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = None; + spec + } + ForkName::Fulu => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(Epoch::new(0)); spec } } @@ -113,6 +131,7 @@ impl ForkName { ForkName::Capella => Some(ForkName::Bellatrix), ForkName::Deneb => Some(ForkName::Capella), ForkName::Electra => Some(ForkName::Deneb), + ForkName::Fulu => Some(ForkName::Electra), } } @@ -126,7 +145,8 @@ impl ForkName { ForkName::Bellatrix => Some(ForkName::Capella), ForkName::Capella => Some(ForkName::Deneb), ForkName::Deneb => Some(ForkName::Electra), - ForkName::Electra => None, + ForkName::Electra => Some(ForkName::Fulu), + ForkName::Fulu => None, } } @@ -149,6 +169,10 @@ impl ForkName { pub fn electra_enabled(self) -> bool { self >= ForkName::Electra } + + pub fn fulu_enabled(self) -> bool { + self >= ForkName::Fulu + } } /// Map a fork name into a fork-versioned superstruct type like `BeaconBlock`. @@ -200,6 +224,10 @@ macro_rules! map_fork_name_with { let (value, extra_data) = $body; ($t::Electra(value), extra_data) } + ForkName::Fulu => { + let (value, extra_data) = $body; + ($t::Fulu(value), extra_data) + } } }; } @@ -215,6 +243,7 @@ impl FromStr for ForkName { "capella" => ForkName::Capella, "deneb" => ForkName::Deneb, "electra" => ForkName::Electra, + "fulu" => ForkName::Fulu, _ => return Err(format!("unknown fork name: {}", fork_name)), }) } @@ -229,6 +258,7 @@ impl Display for ForkName { ForkName::Capella => "capella".fmt(f), ForkName::Deneb => "deneb".fmt(f), ForkName::Electra => "electra".fmt(f), + ForkName::Fulu => "fulu".fmt(f), } } } diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index dd304c6296..282f27a517 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -126,13 +126,13 @@ pub use crate::attester_slashing::{ }; pub use crate::beacon_block::{ BeaconBlock, BeaconBlockAltair, BeaconBlockBase, BeaconBlockBellatrix, BeaconBlockCapella, - BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockRef, BeaconBlockRefMut, BlindedBeaconBlock, - BlockImportSource, EmptyBlock, + BeaconBlockDeneb, BeaconBlockElectra, BeaconBlockFulu, BeaconBlockRef, BeaconBlockRefMut, + BlindedBeaconBlock, BlockImportSource, EmptyBlock, }; pub use crate::beacon_block_body::{ BeaconBlockBody, BeaconBlockBodyAltair, BeaconBlockBodyBase, BeaconBlockBodyBellatrix, - BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyRef, - BeaconBlockBodyRefMut, + BeaconBlockBodyCapella, BeaconBlockBodyDeneb, BeaconBlockBodyElectra, BeaconBlockBodyFulu, + BeaconBlockBodyRef, BeaconBlockBodyRefMut, }; pub use crate::beacon_block_header::BeaconBlockHeader; pub use crate::beacon_committee::{BeaconCommittee, OwnedBeaconCommittee}; @@ -142,7 +142,7 @@ pub use crate::bls_to_execution_change::BlsToExecutionChange; pub use crate::chain_spec::{ChainSpec, Config, Domain}; pub use crate::checkpoint::Checkpoint; pub use crate::config_and_preset::{ - ConfigAndPreset, ConfigAndPresetCapella, ConfigAndPresetDeneb, ConfigAndPresetElectra, + ConfigAndPreset, ConfigAndPresetDeneb, ConfigAndPresetElectra, ConfigAndPresetFulu, }; pub use crate::consolidation_request::ConsolidationRequest; pub use crate::contribution_and_proof::ContributionAndProof; @@ -163,12 +163,13 @@ pub use crate::execution_block_hash::ExecutionBlockHash; pub use crate::execution_block_header::{EncodableExecutionBlockHeader, ExecutionBlockHeader}; pub use crate::execution_payload::{ ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, - ExecutionPayloadElectra, ExecutionPayloadRef, Transaction, Transactions, Withdrawals, + ExecutionPayloadElectra, ExecutionPayloadFulu, ExecutionPayloadRef, Transaction, Transactions, + Withdrawals, }; pub use crate::execution_payload_header::{ ExecutionPayloadHeader, ExecutionPayloadHeaderBellatrix, ExecutionPayloadHeaderCapella, - ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderRef, - ExecutionPayloadHeaderRefMut, + ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, + ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; pub use crate::fork::Fork; @@ -183,31 +184,33 @@ pub use crate::indexed_attestation::{ }; pub use crate::light_client_bootstrap::{ LightClientBootstrap, LightClientBootstrapAltair, LightClientBootstrapCapella, - LightClientBootstrapDeneb, LightClientBootstrapElectra, + LightClientBootstrapDeneb, LightClientBootstrapElectra, LightClientBootstrapFulu, }; pub use crate::light_client_finality_update::{ LightClientFinalityUpdate, LightClientFinalityUpdateAltair, LightClientFinalityUpdateCapella, LightClientFinalityUpdateDeneb, LightClientFinalityUpdateElectra, + LightClientFinalityUpdateFulu, }; pub use crate::light_client_header::{ LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, + LightClientHeaderElectra, LightClientHeaderFulu, }; pub use crate::light_client_optimistic_update::{ LightClientOptimisticUpdate, LightClientOptimisticUpdateAltair, LightClientOptimisticUpdateCapella, LightClientOptimisticUpdateDeneb, - LightClientOptimisticUpdateElectra, + LightClientOptimisticUpdateElectra, LightClientOptimisticUpdateFulu, }; pub use crate::light_client_update::{ Error as LightClientUpdateError, LightClientUpdate, LightClientUpdateAltair, - LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, MerkleProof, + LightClientUpdateCapella, LightClientUpdateDeneb, LightClientUpdateElectra, + LightClientUpdateFulu, MerkleProof, }; pub use crate::participation_flags::ParticipationFlags; pub use crate::payload::{ AbstractExecPayload, BlindedPayload, BlindedPayloadBellatrix, BlindedPayloadCapella, - BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadRef, BlockType, ExecPayload, - FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, FullPayloadElectra, - FullPayloadRef, OwnedExecPayload, + BlindedPayloadDeneb, BlindedPayloadElectra, BlindedPayloadFulu, BlindedPayloadRef, BlockType, + ExecPayload, FullPayload, FullPayloadBellatrix, FullPayloadCapella, FullPayloadDeneb, + FullPayloadElectra, FullPayloadFulu, FullPayloadRef, OwnedExecPayload, }; pub use crate::pending_attestation::PendingAttestation; pub use crate::pending_consolidation::PendingConsolidation; @@ -215,6 +218,7 @@ pub use crate::pending_deposit::PendingDeposit; pub use crate::pending_partial_withdrawal::PendingPartialWithdrawal; pub use crate::preset::{ AltairPreset, BasePreset, BellatrixPreset, CapellaPreset, DenebPreset, ElectraPreset, + FuluPreset, }; pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; @@ -229,7 +233,7 @@ pub use crate::signed_beacon_block::{ ssz_tagged_signed_beacon_block, ssz_tagged_signed_beacon_block_arc, SignedBeaconBlock, SignedBeaconBlockAltair, SignedBeaconBlockBase, SignedBeaconBlockBellatrix, SignedBeaconBlockCapella, SignedBeaconBlockDeneb, SignedBeaconBlockElectra, - SignedBeaconBlockHash, SignedBlindedBeaconBlock, + SignedBeaconBlockFulu, SignedBeaconBlockHash, SignedBlindedBeaconBlock, }; pub use crate::signed_beacon_block_header::SignedBeaconBlockHeader; pub use crate::signed_bls_to_execution_change::SignedBlsToExecutionChange; diff --git a/consensus/types/src/light_client_bootstrap.rs b/consensus/types/src/light_client_bootstrap.rs index 21a7e5416f..aa0d8836d1 100644 --- a/consensus/types/src/light_client_bootstrap.rs +++ b/consensus/types/src/light_client_bootstrap.rs @@ -2,7 +2,7 @@ use crate::{ light_client_update::*, test_utils::TestRandom, BeaconState, ChainSpec, EthSpec, FixedVector, ForkName, ForkVersionDeserialize, Hash256, LightClientHeader, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, LightClientHeaderElectra, - SignedBlindedBeaconBlock, Slot, SyncCommittee, + LightClientHeaderFulu, SignedBlindedBeaconBlock, Slot, SyncCommittee, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -17,7 +17,7 @@ use tree_hash_derive::TreeHash; /// A LightClientBootstrap is the initializer we send over to light_client nodes /// that are trying to generate their basic storage when booting up. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -54,6 +54,8 @@ pub struct LightClientBootstrap { pub header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "header_electra"))] pub header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "header_fulu"))] + pub header: LightClientHeaderFulu, /// The `SyncCommittee` used in the requested period. pub current_sync_committee: Arc>, /// Merkle proof for sync committee @@ -63,7 +65,7 @@ pub struct LightClientBootstrap { )] pub current_sync_committee_branch: FixedVector, #[superstruct( - only(Electra), + only(Electra, Fulu), partial_getter(rename = "current_sync_committee_branch_electra") )] pub current_sync_committee_branch: FixedVector, @@ -79,6 +81,7 @@ impl LightClientBootstrap { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -97,6 +100,7 @@ impl LightClientBootstrap { ForkName::Capella => Self::Capella(LightClientBootstrapCapella::from_ssz_bytes(bytes)?), ForkName::Deneb => Self::Deneb(LightClientBootstrapDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientBootstrapElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientBootstrap decoding for {fork_name} not implemented" @@ -117,6 +121,7 @@ impl LightClientBootstrap { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -152,6 +157,11 @@ impl LightClientBootstrap { current_sync_committee, current_sync_committee_branch: current_sync_committee_branch.into(), }), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { + header: LightClientHeaderFulu::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch: current_sync_committee_branch.into(), + }), }; Ok(light_client_bootstrap) @@ -192,6 +202,11 @@ impl LightClientBootstrap { current_sync_committee, current_sync_committee_branch: current_sync_committee_branch.into(), }), + ForkName::Fulu => Self::Fulu(LightClientBootstrapFulu { + header: LightClientHeaderFulu::block_to_light_client_header(block)?, + current_sync_committee, + current_sync_committee_branch: current_sync_committee_branch.into(), + }), }; Ok(light_client_bootstrap) @@ -241,4 +256,10 @@ mod tests { use crate::{LightClientBootstrapElectra, MainnetEthSpec}; ssz_tests!(LightClientBootstrapElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientBootstrapFulu, MainnetEthSpec}; + ssz_tests!(LightClientBootstrapFulu); + } } diff --git a/consensus/types/src/light_client_finality_update.rs b/consensus/types/src/light_client_finality_update.rs index ba2f2083cd..ee3b53c853 100644 --- a/consensus/types/src/light_client_finality_update.rs +++ b/consensus/types/src/light_client_finality_update.rs @@ -3,7 +3,7 @@ use crate::ChainSpec; use crate::{ light_client_update::*, test_utils::TestRandom, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - LightClientHeaderElectra, SignedBlindedBeaconBlock, + LightClientHeaderElectra, LightClientHeaderFulu, SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -16,7 +16,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -53,6 +53,8 @@ pub struct LightClientFinalityUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// The last `BeaconBlockHeader` from the last attested finalized block (end of epoch). #[superstruct(only(Altair), partial_getter(rename = "finalized_header_altair"))] pub finalized_header: LightClientHeaderAltair, @@ -62,13 +64,18 @@ pub struct LightClientFinalityUpdate { pub finalized_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] + pub finalized_header: LightClientHeaderFulu, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), partial_getter(rename = "finality_branch_altair") )] pub finality_branch: FixedVector, - #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "finality_branch_electra") + )] pub finality_branch: FixedVector, /// current sync aggregate pub sync_aggregate: SyncAggregate, @@ -135,6 +142,17 @@ impl LightClientFinalityUpdate { sync_aggregate, signature_slot, }), + ForkName::Fulu => Self::Fulu(LightClientFinalityUpdateFulu { + attested_header: LightClientHeaderFulu::block_to_light_client_header( + attested_block, + )?, + finalized_header: LightClientHeaderFulu::block_to_light_client_header( + finalized_block, + )?, + finality_branch: finality_branch.into(), + sync_aggregate, + signature_slot, + }), ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -151,6 +169,7 @@ impl LightClientFinalityUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -173,6 +192,7 @@ impl LightClientFinalityUpdate { ForkName::Electra => { Self::Electra(LightClientFinalityUpdateElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => Self::Fulu(LightClientFinalityUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientFinalityUpdate decoding for {fork_name} not implemented" @@ -193,6 +213,7 @@ impl LightClientFinalityUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; // `2 *` because there are two headers in the update fixed_size + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) @@ -255,4 +276,10 @@ mod tests { use crate::{LightClientFinalityUpdateElectra, MainnetEthSpec}; ssz_tests!(LightClientFinalityUpdateElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientFinalityUpdateFulu, MainnetEthSpec}; + ssz_tests!(LightClientFinalityUpdateFulu); + } } diff --git a/consensus/types/src/light_client_header.rs b/consensus/types/src/light_client_header.rs index 6655e0a093..0be26a7036 100644 --- a/consensus/types/src/light_client_header.rs +++ b/consensus/types/src/light_client_header.rs @@ -4,7 +4,8 @@ use crate::ForkVersionDeserialize; use crate::{light_client_update::*, BeaconBlockBody}; use crate::{ test_utils::TestRandom, EthSpec, ExecutionPayloadHeaderCapella, ExecutionPayloadHeaderDeneb, - ExecutionPayloadHeaderElectra, FixedVector, Hash256, SignedBlindedBeaconBlock, + ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, FixedVector, Hash256, + SignedBlindedBeaconBlock, }; use crate::{BeaconBlockHeader, ExecutionPayloadHeader}; use derivative::Derivative; @@ -17,7 +18,7 @@ use test_random_derive::TestRandom; use tree_hash_derive::TreeHash; #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -59,8 +60,10 @@ pub struct LightClientHeader { partial_getter(rename = "execution_payload_header_electra") )] pub execution: ExecutionPayloadHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_header_fulu"))] + pub execution: ExecutionPayloadHeaderFulu, - #[superstruct(only(Capella, Deneb, Electra))] + #[superstruct(only(Capella, Deneb, Electra, Fulu))] pub execution_branch: FixedVector, #[ssz(skip_serializing, skip_deserializing)] @@ -92,6 +95,9 @@ impl LightClientHeader { ForkName::Electra => LightClientHeader::Electra( LightClientHeaderElectra::block_to_light_client_header(block)?, ), + ForkName::Fulu => { + LightClientHeader::Fulu(LightClientHeaderFulu::block_to_light_client_header(block)?) + } }; Ok(header) } @@ -110,6 +116,9 @@ impl LightClientHeader { ForkName::Electra => { LightClientHeader::Electra(LightClientHeaderElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => { + LightClientHeader::Fulu(LightClientHeaderFulu::from_ssz_bytes(bytes)?) + } ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientHeader decoding for {fork_name} not implemented" @@ -283,6 +292,48 @@ impl Default for LightClientHeaderElectra { } } +impl LightClientHeaderFulu { + pub fn block_to_light_client_header( + block: &SignedBlindedBeaconBlock, + ) -> Result { + let payload = block + .message() + .execution_payload()? + .execution_payload_fulu()?; + + let header = ExecutionPayloadHeaderFulu::from(payload); + let beacon_block_body = BeaconBlockBody::from( + block + .message() + .body_fulu() + .map_err(|_| Error::BeaconBlockBodyError)? + .to_owned(), + ); + + let execution_branch = beacon_block_body + .to_ref() + .block_body_merkle_proof(EXECUTION_PAYLOAD_INDEX)?; + + Ok(LightClientHeaderFulu { + beacon: block.message().block_header(), + execution: header, + execution_branch: FixedVector::new(execution_branch)?, + _phantom_data: PhantomData, + }) + } +} + +impl Default for LightClientHeaderFulu { + fn default() -> Self { + Self { + beacon: BeaconBlockHeader::empty(), + execution: ExecutionPayloadHeaderFulu::default(), + execution_branch: FixedVector::default(), + _phantom_data: PhantomData, + } + } +} + impl ForkVersionDeserialize for LightClientHeader { fn deserialize_by_fork<'de, D: serde::Deserializer<'de>>( value: serde_json::value::Value, @@ -301,6 +352,9 @@ impl ForkVersionDeserialize for LightClientHeader { ForkName::Electra => serde_json::from_value(value) .map(|light_client_header| Self::Electra(light_client_header)) .map_err(serde::de::Error::custom), + ForkName::Fulu => serde_json::from_value(value) + .map(|light_client_header| Self::Fulu(light_client_header)) + .map_err(serde::de::Error::custom), ForkName::Base => Err(serde::de::Error::custom(format!( "LightClientHeader deserialization for {fork_name} not implemented" ))), diff --git a/consensus/types/src/light_client_optimistic_update.rs b/consensus/types/src/light_client_optimistic_update.rs index 209388af87..fcf357757b 100644 --- a/consensus/types/src/light_client_optimistic_update.rs +++ b/consensus/types/src/light_client_optimistic_update.rs @@ -2,7 +2,8 @@ use super::{EthSpec, ForkName, ForkVersionDeserialize, LightClientHeader, Slot, use crate::test_utils::TestRandom; use crate::{ light_client_update::*, ChainSpec, LightClientHeaderAltair, LightClientHeaderCapella, - LightClientHeaderDeneb, LightClientHeaderElectra, SignedBlindedBeaconBlock, + LightClientHeaderDeneb, LightClientHeaderElectra, LightClientHeaderFulu, + SignedBlindedBeaconBlock, }; use derivative::Derivative; use serde::{Deserialize, Deserializer, Serialize}; @@ -18,7 +19,7 @@ use tree_hash_derive::TreeHash; /// A LightClientOptimisticUpdate is the update we send on each slot, /// it is based off the current unfinalized epoch is verified only against BLS signature. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -55,6 +56,8 @@ pub struct LightClientOptimisticUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// current sync aggregate pub sync_aggregate: SyncAggregate, /// Slot of the sync aggregated signature @@ -102,6 +105,13 @@ impl LightClientOptimisticUpdate { sync_aggregate, signature_slot, }), + ForkName::Fulu => Self::Fulu(LightClientOptimisticUpdateFulu { + attested_header: LightClientHeaderFulu::block_to_light_client_header( + attested_block, + )?, + sync_aggregate, + signature_slot, + }), ForkName::Base => return Err(Error::AltairForkNotActive), }; @@ -117,6 +127,7 @@ impl LightClientOptimisticUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } @@ -155,6 +166,7 @@ impl LightClientOptimisticUpdate { ForkName::Electra => { Self::Electra(LightClientOptimisticUpdateElectra::from_ssz_bytes(bytes)?) } + ForkName::Fulu => Self::Fulu(LightClientOptimisticUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientOptimisticUpdate decoding for {fork_name} not implemented" @@ -175,6 +187,7 @@ impl LightClientOptimisticUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -238,4 +251,10 @@ mod tests { use crate::{LightClientOptimisticUpdateElectra, MainnetEthSpec}; ssz_tests!(LightClientOptimisticUpdateElectra); } + + #[cfg(test)] + mod fulu { + use crate::{LightClientOptimisticUpdateFulu, MainnetEthSpec}; + ssz_tests!(LightClientOptimisticUpdateFulu); + } } diff --git a/consensus/types/src/light_client_update.rs b/consensus/types/src/light_client_update.rs index c3a50e71c1..0dd91edc3c 100644 --- a/consensus/types/src/light_client_update.rs +++ b/consensus/types/src/light_client_update.rs @@ -4,7 +4,7 @@ use crate::LightClientHeader; use crate::{ beacon_state, test_utils::TestRandom, ChainSpec, Epoch, ForkName, ForkVersionDeserialize, LightClientHeaderAltair, LightClientHeaderCapella, LightClientHeaderDeneb, - SignedBlindedBeaconBlock, + LightClientHeaderFulu, SignedBlindedBeaconBlock, }; use derivative::Derivative; use safe_arith::ArithError; @@ -100,7 +100,7 @@ impl From for Error { /// or to sync up to the last committee period, we need to have one ready for each ALTAIR period /// we go over, note: there is no need to keep all of the updates from [ALTAIR_PERIOD, CURRENT_PERIOD]. #[superstruct( - variants(Altair, Capella, Deneb, Electra), + variants(Altair, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -137,6 +137,8 @@ pub struct LightClientUpdate { pub attested_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "attested_header_electra"))] pub attested_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "attested_header_fulu"))] + pub attested_header: LightClientHeaderFulu, /// The `SyncCommittee` used in the next period. pub next_sync_committee: Arc>, // Merkle proof for next sync committee @@ -146,7 +148,7 @@ pub struct LightClientUpdate { )] pub next_sync_committee_branch: NextSyncCommitteeBranch, #[superstruct( - only(Electra), + only(Electra, Fulu), partial_getter(rename = "next_sync_committee_branch_electra") )] pub next_sync_committee_branch: NextSyncCommitteeBranchElectra, @@ -159,13 +161,18 @@ pub struct LightClientUpdate { pub finalized_header: LightClientHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "finalized_header_electra"))] pub finalized_header: LightClientHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "finalized_header_fulu"))] + pub finalized_header: LightClientHeaderFulu, /// Merkle proof attesting finalized header. #[superstruct( only(Altair, Capella, Deneb), partial_getter(rename = "finality_branch_altair") )] pub finality_branch: FinalityBranch, - #[superstruct(only(Electra), partial_getter(rename = "finality_branch_electra"))] + #[superstruct( + only(Electra, Fulu), + partial_getter(rename = "finality_branch_electra") + )] pub finality_branch: FinalityBranchElectra, /// current sync aggreggate pub sync_aggregate: SyncAggregate, @@ -285,6 +292,26 @@ impl LightClientUpdate { sync_aggregate: sync_aggregate.clone(), signature_slot: block_slot, }) + } + ForkName::Fulu => { + let attested_header = + LightClientHeaderFulu::block_to_light_client_header(attested_block)?; + + let finalized_header = if let Some(finalized_block) = finalized_block { + LightClientHeaderFulu::block_to_light_client_header(finalized_block)? + } else { + LightClientHeaderFulu::default() + }; + + Self::Fulu(LightClientUpdateFulu { + attested_header, + next_sync_committee, + next_sync_committee_branch: next_sync_committee_branch.into(), + finalized_header, + finality_branch: finality_branch.into(), + sync_aggregate: sync_aggregate.clone(), + signature_slot: block_slot, + }) } // To add a new fork, just append the new fork variant on the latest fork. Forks that // have a distinct execution header will need a new LightClientUpdate variant only // if you need to test or support lightclient usages @@ -301,6 +328,7 @@ impl LightClientUpdate { ForkName::Capella => Self::Capella(LightClientUpdateCapella::from_ssz_bytes(bytes)?), ForkName::Deneb => Self::Deneb(LightClientUpdateDeneb::from_ssz_bytes(bytes)?), ForkName::Electra => Self::Electra(LightClientUpdateElectra::from_ssz_bytes(bytes)?), + ForkName::Fulu => Self::Fulu(LightClientUpdateFulu::from_ssz_bytes(bytes)?), ForkName::Base => { return Err(ssz::DecodeError::BytesInvalid(format!( "LightClientUpdate decoding for {fork_name} not implemented" @@ -317,6 +345,7 @@ impl LightClientUpdate { LightClientUpdate::Capella(update) => update.attested_header.beacon.slot, LightClientUpdate::Deneb(update) => update.attested_header.beacon.slot, LightClientUpdate::Electra(update) => update.attested_header.beacon.slot, + LightClientUpdate::Fulu(update) => update.attested_header.beacon.slot, } } @@ -326,6 +355,7 @@ impl LightClientUpdate { LightClientUpdate::Capella(update) => update.finalized_header.beacon.slot, LightClientUpdate::Deneb(update) => update.finalized_header.beacon.slot, LightClientUpdate::Electra(update) => update.finalized_header.beacon.slot, + LightClientUpdate::Fulu(update) => update.finalized_header.beacon.slot, } } @@ -445,6 +475,7 @@ impl LightClientUpdate { ForkName::Capella => as Encode>::ssz_fixed_len(), ForkName::Deneb => as Encode>::ssz_fixed_len(), ForkName::Electra => as Encode>::ssz_fixed_len(), + ForkName::Fulu => as Encode>::ssz_fixed_len(), }; fixed_len + 2 * LightClientHeader::::ssz_max_var_len_for_fork(fork_name) } @@ -458,6 +489,7 @@ impl LightClientUpdate { Self::Capella(_) => func(ForkName::Capella), Self::Deneb(_) => func(ForkName::Deneb), Self::Electra(_) => func(ForkName::Electra), + Self::Fulu(_) => func(ForkName::Fulu), } } } @@ -513,6 +545,13 @@ mod tests { ssz_tests!(LightClientUpdateElectra); } + #[cfg(test)] + mod fulu { + use super::*; + use crate::MainnetEthSpec; + ssz_tests!(LightClientUpdateFulu); + } + #[test] fn finalized_root_params() { assert!(2usize.pow(FINALIZED_ROOT_PROOF_LEN as u32) <= FINALIZED_ROOT_INDEX); diff --git a/consensus/types/src/payload.rs b/consensus/types/src/payload.rs index e68801840a..abc9afd34c 100644 --- a/consensus/types/src/payload.rs +++ b/consensus/types/src/payload.rs @@ -84,13 +84,15 @@ pub trait AbstractExecPayload: + TryInto + TryInto + TryInto + + TryInto { type Ref<'a>: ExecPayload + Copy + From<&'a Self::Bellatrix> + From<&'a Self::Capella> + From<&'a Self::Deneb> - + From<&'a Self::Electra>; + + From<&'a Self::Electra> + + From<&'a Self::Fulu>; type Bellatrix: OwnedExecPayload + Into @@ -108,10 +110,14 @@ pub trait AbstractExecPayload: + Into + for<'a> From>> + TryFrom>; + type Fulu: OwnedExecPayload + + Into + + for<'a> From>> + + TryFrom>; } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -157,6 +163,8 @@ pub struct FullPayload { pub execution_payload: ExecutionPayloadDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload: ExecutionPayloadElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload: ExecutionPayloadFulu, } impl From> for ExecutionPayload { @@ -273,6 +281,9 @@ impl ExecPayload for FullPayload { FullPayload::Electra(ref inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayload::Fulu(ref inner) => { + Ok(inner.execution_payload.withdrawals.tree_hash_root()) + } } } @@ -283,6 +294,7 @@ impl ExecPayload for FullPayload { } FullPayload::Deneb(ref inner) => Ok(inner.execution_payload.blob_gas_used), FullPayload::Electra(ref inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayload::Fulu(ref inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -313,6 +325,7 @@ impl FullPayload { ForkName::Capella => Ok(FullPayloadCapella::default().into()), ForkName::Deneb => Ok(FullPayloadDeneb::default().into()), ForkName::Electra => Ok(FullPayloadElectra::default().into()), + ForkName::Fulu => Ok(FullPayloadFulu::default().into()), } } } @@ -412,6 +425,7 @@ impl ExecPayload for FullPayloadRef<'_, E> { FullPayloadRef::Electra(inner) => { Ok(inner.execution_payload.withdrawals.tree_hash_root()) } + FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.withdrawals.tree_hash_root()), } } @@ -422,6 +436,7 @@ impl ExecPayload for FullPayloadRef<'_, E> { } FullPayloadRef::Deneb(inner) => Ok(inner.execution_payload.blob_gas_used), FullPayloadRef::Electra(inner) => Ok(inner.execution_payload.blob_gas_used), + FullPayloadRef::Fulu(inner) => Ok(inner.execution_payload.blob_gas_used), } } @@ -444,6 +459,7 @@ impl AbstractExecPayload for FullPayload { type Capella = FullPayloadCapella; type Deneb = FullPayloadDeneb; type Electra = FullPayloadElectra; + type Fulu = FullPayloadFulu; } impl From> for FullPayload { @@ -462,7 +478,7 @@ impl TryFrom> for FullPayload { } #[superstruct( - variants(Bellatrix, Capella, Deneb, Electra), + variants(Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -507,6 +523,8 @@ pub struct BlindedPayload { pub execution_payload_header: ExecutionPayloadHeaderDeneb, #[superstruct(only(Electra), partial_getter(rename = "execution_payload_electra"))] pub execution_payload_header: ExecutionPayloadHeaderElectra, + #[superstruct(only(Fulu), partial_getter(rename = "execution_payload_fulu"))] + pub execution_payload_header: ExecutionPayloadHeaderFulu, } impl<'a, E: EthSpec> From> for BlindedPayload { @@ -599,6 +617,7 @@ impl ExecPayload for BlindedPayload { BlindedPayload::Electra(ref inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -609,6 +628,7 @@ impl ExecPayload for BlindedPayload { } BlindedPayload::Deneb(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayload::Electra(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayload::Fulu(ref inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -707,6 +727,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { BlindedPayloadRef::Electra(inner) => { Ok(inner.execution_payload_header.withdrawals_root) } + BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.withdrawals_root), } } @@ -717,6 +738,7 @@ impl<'b, E: EthSpec> ExecPayload for BlindedPayloadRef<'b, E> { } BlindedPayloadRef::Deneb(inner) => Ok(inner.execution_payload_header.blob_gas_used), BlindedPayloadRef::Electra(inner) => Ok(inner.execution_payload_header.blob_gas_used), + BlindedPayloadRef::Fulu(inner) => Ok(inner.execution_payload_header.blob_gas_used), } } @@ -1020,6 +1042,13 @@ impl_exec_payload_for_fork!( ExecutionPayloadElectra, Electra ); +impl_exec_payload_for_fork!( + BlindedPayloadFulu, + FullPayloadFulu, + ExecutionPayloadHeaderFulu, + ExecutionPayloadFulu, + Fulu +); impl AbstractExecPayload for BlindedPayload { type Ref<'a> = BlindedPayloadRef<'a, E>; @@ -1027,6 +1056,7 @@ impl AbstractExecPayload for BlindedPayload { type Capella = BlindedPayloadCapella; type Deneb = BlindedPayloadDeneb; type Electra = BlindedPayloadElectra; + type Fulu = BlindedPayloadFulu; } impl From> for BlindedPayload { @@ -1063,6 +1093,11 @@ impl From> for BlindedPayload { execution_payload_header, }) } + ExecutionPayloadHeader::Fulu(execution_payload_header) => { + Self::Fulu(BlindedPayloadFulu { + execution_payload_header, + }) + } } } } @@ -1082,6 +1117,9 @@ impl From> for ExecutionPayloadHeader { BlindedPayload::Electra(blinded_payload) => { ExecutionPayloadHeader::Electra(blinded_payload.execution_payload_header) } + BlindedPayload::Fulu(blinded_payload) => { + ExecutionPayloadHeader::Fulu(blinded_payload.execution_payload_header) + } } } } diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index b469b7b777..f8b3665409 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -276,6 +276,21 @@ impl ElectraPreset { } } +#[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] +#[serde(rename_all = "UPPERCASE")] +pub struct FuluPreset { + #[serde(with = "serde_utils::quoted_u64")] + pub fulu_placeholder: u64, +} + +impl FuluPreset { + pub fn from_chain_spec(spec: &ChainSpec) -> Self { + Self { + fulu_placeholder: spec.fulu_placeholder, + } + } +} + #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct Eip7594Preset { @@ -343,6 +358,9 @@ mod test { let electra: ElectraPreset = preset_from_file(&preset_name, "electra.yaml"); assert_eq!(electra, ElectraPreset::from_chain_spec::(&spec)); + let fulu: FuluPreset = preset_from_file(&preset_name, "fulu.yaml"); + assert_eq!(fulu, FuluPreset::from_chain_spec::(&spec)); + let eip7594: Eip7594Preset = preset_from_file(&preset_name, "eip7594.yaml"); assert_eq!(eip7594, Eip7594Preset::from_chain_spec::(&spec)); } diff --git a/consensus/types/src/signed_beacon_block.rs b/consensus/types/src/signed_beacon_block.rs index bb5e1ea34b..d9bf9bf55d 100644 --- a/consensus/types/src/signed_beacon_block.rs +++ b/consensus/types/src/signed_beacon_block.rs @@ -38,7 +38,7 @@ impl From for Hash256 { /// A `BeaconBlock` and a signature from its proposer. #[superstruct( - variants(Base, Altair, Bellatrix, Capella, Deneb, Electra), + variants(Base, Altair, Bellatrix, Capella, Deneb, Electra, Fulu), variant_attributes( derive( Debug, @@ -81,6 +81,8 @@ pub struct SignedBeaconBlock = FullP pub message: BeaconBlockDeneb, #[superstruct(only(Electra), partial_getter(rename = "message_electra"))] pub message: BeaconBlockElectra, + #[superstruct(only(Fulu), partial_getter(rename = "message_fulu"))] + pub message: BeaconBlockFulu, pub signature: Signature, } @@ -163,6 +165,9 @@ impl> SignedBeaconBlock BeaconBlock::Electra(message) => { SignedBeaconBlock::Electra(SignedBeaconBlockElectra { message, signature }) } + BeaconBlock::Fulu(message) => { + SignedBeaconBlock::Fulu(SignedBeaconBlockFulu { message, signature }) + } } } @@ -570,6 +575,64 @@ impl SignedBeaconBlockElectra> { } } +impl SignedBeaconBlockFulu> { + pub fn into_full_block( + self, + execution_payload: ExecutionPayloadFulu, + ) -> SignedBeaconBlockFulu> { + let SignedBeaconBlockFulu { + message: + BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root, + body: + BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: BlindedPayloadFulu { .. }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + }, + }, + signature, + } = self; + SignedBeaconBlockFulu { + message: BeaconBlockFulu { + slot, + proposer_index, + parent_root, + state_root, + body: BeaconBlockBodyFulu { + randao_reveal, + eth1_data, + graffiti, + proposer_slashings, + attester_slashings, + attestations, + deposits, + voluntary_exits, + sync_aggregate, + execution_payload: FullPayloadFulu { execution_payload }, + bls_to_execution_changes, + blob_kzg_commitments, + execution_requests, + }, + }, + signature, + } + } +} + impl SignedBeaconBlock> { pub fn try_into_full_block( self, @@ -590,12 +653,16 @@ impl SignedBeaconBlock> { (SignedBeaconBlock::Electra(block), Some(ExecutionPayload::Electra(payload))) => { SignedBeaconBlock::Electra(block.into_full_block(payload)) } + (SignedBeaconBlock::Fulu(block), Some(ExecutionPayload::Fulu(payload))) => { + SignedBeaconBlock::Fulu(block.into_full_block(payload)) + } // avoid wildcard matching forks so that compiler will // direct us here when a new fork has been added (SignedBeaconBlock::Bellatrix(_), _) => return None, (SignedBeaconBlock::Capella(_), _) => return None, (SignedBeaconBlock::Deneb(_), _) => return None, (SignedBeaconBlock::Electra(_), _) => return None, + (SignedBeaconBlock::Fulu(_), _) => return None, }; Some(full_block) } @@ -741,6 +808,9 @@ pub mod ssz_tagged_signed_beacon_block { ForkName::Electra => Ok(SignedBeaconBlock::Electra( SignedBeaconBlockElectra::from_ssz_bytes(body)?, )), + ForkName::Fulu => Ok(SignedBeaconBlock::Fulu( + SignedBeaconBlockFulu::from_ssz_bytes(body)?, + )), } } } @@ -841,8 +911,9 @@ mod test { ), SignedBeaconBlock::from_block( BeaconBlock::Electra(BeaconBlockElectra::empty(spec)), - sig, + sig.clone(), ), + SignedBeaconBlock::from_block(BeaconBlock::Fulu(BeaconBlockFulu::empty(spec)), sig), ]; for block in blocks { diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 8d3220b1df..7719f02aa3 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -19,6 +19,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let shanghai_time = parse_required(matches, "shanghai-time")?; let cancun_time = parse_optional(matches, "cancun-time")?; let prague_time = parse_optional(matches, "prague-time")?; + let osaka_time = parse_optional(matches, "osaka-time")?; let handle = env.core_context().executor.handle().unwrap(); let spec = &E::default_spec(); @@ -37,6 +38,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< shanghai_time: Some(shanghai_time), cancun_time, prague_time, + osaka_time, }; let kzg = None; let server: MockServer = MockServer::new_with_config(&handle, config, kzg); diff --git a/testing/ef_tests/src/cases/common.rs b/testing/ef_tests/src/cases/common.rs index e16f5b257f..62f834820f 100644 --- a/testing/ef_tests/src/cases/common.rs +++ b/testing/ef_tests/src/cases/common.rs @@ -66,6 +66,7 @@ pub fn previous_fork(fork_name: ForkName) -> ForkName { ForkName::Capella => ForkName::Bellatrix, ForkName::Deneb => ForkName::Capella, ForkName::Electra => ForkName::Deneb, + ForkName::Fulu => ForkName::Electra, } } diff --git a/testing/ef_tests/src/cases/epoch_processing.rs b/testing/ef_tests/src/cases/epoch_processing.rs index c1adf10770..e05225c171 100644 --- a/testing/ef_tests/src/cases/epoch_processing.rs +++ b/testing/ef_tests/src/cases/epoch_processing.rs @@ -100,47 +100,35 @@ type_name!(ParticipationFlagUpdates, "participation_flag_updates"); impl EpochTransition for JustificationAndFinalization { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - let justification_and_finalization_state = - base::process_justification_and_finalization( - state, - &validator_statuses.total_balances, - spec, - )?; - justification_and_finalization_state.apply_changes_to_state(state); - Ok(()) - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - initialize_progressive_balances_cache(state, spec)?; - let justification_and_finalization_state = - altair::process_justification_and_finalization(state)?; - justification_and_finalization_state.apply_changes_to_state(state); - Ok(()) - } + if state.fork_name_unchecked().altair_enabled() { + initialize_progressive_balances_cache(state, spec)?; + let justification_and_finalization_state = + altair::process_justification_and_finalization(state)?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + let justification_and_finalization_state = + base::process_justification_and_finalization( + state, + &validator_statuses.total_balances, + spec, + )?; + justification_and_finalization_state.apply_changes_to_state(state); + Ok(()) } } } impl EpochTransition for RewardsAndPenalties { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - base::process_rewards_and_penalties(state, &validator_statuses, spec) - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_rewards_and_penalties_slow(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_rewards_and_penalties_slow(state, spec) + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + base::process_rewards_and_penalties(state, &validator_statuses, spec) } } } @@ -159,24 +147,17 @@ impl EpochTransition for RegistryUpdates { impl EpochTransition for Slashings { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => { - let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; - validator_statuses.process_attestations(state)?; - process_slashings( - state, - validator_statuses.total_balances.current_epoch(), - spec, - )?; - } - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - process_slashings_slow(state, spec)?; - } - }; + if state.fork_name_unchecked().altair_enabled() { + process_slashings_slow(state, spec)?; + } else { + let mut validator_statuses = base::ValidatorStatuses::new(state, spec)?; + validator_statuses.process_attestations(state)?; + process_slashings( + state, + validator_statuses.total_balances.current_epoch(), + spec, + )?; + } Ok(()) } } @@ -251,11 +232,10 @@ impl EpochTransition for HistoricalRootsUpdate { impl EpochTransition for HistoricalSummariesUpdate { fn run(state: &mut BeaconState, _spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Capella(_) | BeaconState::Deneb(_) | BeaconState::Electra(_) => { - process_historical_summaries_update(state) - } - _ => Ok(()), + if state.fork_name_unchecked().capella_enabled() { + process_historical_summaries_update(state) + } else { + Ok(()) } } } @@ -272,39 +252,30 @@ impl EpochTransition for ParticipationRecordUpdates { impl EpochTransition for SyncCommitteeUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_sync_committee_updates(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_sync_committee_updates(state, spec) + } else { + Ok(()) } } } impl EpochTransition for InactivityUpdates { fn run(state: &mut BeaconState, spec: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_inactivity_updates_slow(state, spec), + if state.fork_name_unchecked().altair_enabled() { + altair::process_inactivity_updates_slow(state, spec) + } else { + Ok(()) } } } impl EpochTransition for ParticipationFlagUpdates { fn run(state: &mut BeaconState, _: &ChainSpec) -> Result<(), EpochProcessingError> { - match state { - BeaconState::Base(_) => Ok(()), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => altair::process_participation_flag_updates(state), + if state.fork_name_unchecked().altair_enabled() { + altair::process_participation_flag_updates(state) + } else { + Ok(()) } } } diff --git a/testing/ef_tests/src/cases/fork.rs b/testing/ef_tests/src/cases/fork.rs index 132cfb4c0a..85301e22f6 100644 --- a/testing/ef_tests/src/cases/fork.rs +++ b/testing/ef_tests/src/cases/fork.rs @@ -5,7 +5,7 @@ use crate::decode::{ssz_decode_state, yaml_decode_file}; use serde::Deserialize; use state_processing::upgrade::{ upgrade_to_altair, upgrade_to_bellatrix, upgrade_to_capella, upgrade_to_deneb, - upgrade_to_electra, + upgrade_to_electra, upgrade_to_fulu, }; use types::BeaconState; @@ -69,6 +69,7 @@ impl Case for ForkTest { ForkName::Capella => upgrade_to_capella(&mut result_state, spec).map(|_| result_state), ForkName::Deneb => upgrade_to_deneb(&mut result_state, spec).map(|_| result_state), ForkName::Electra => upgrade_to_electra(&mut result_state, spec).map(|_| result_state), + ForkName::Fulu => upgrade_to_fulu(&mut result_state, spec).map(|_| result_state), }; compare_beacon_state_results_without_caches(&mut result, &mut expected) diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 49c0719784..109d2cc796 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -4,7 +4,7 @@ use serde::Deserialize; use tree_hash::Hash256; use types::{ light_client_update, BeaconBlockBody, BeaconBlockBodyCapella, BeaconBlockBodyDeneb, - BeaconBlockBodyElectra, BeaconState, FixedVector, FullPayload, Unsigned, + BeaconBlockBodyElectra, BeaconBlockBodyFulu, BeaconState, FixedVector, FullPayload, Unsigned, }; #[derive(Debug, Clone, Deserialize)] @@ -131,6 +131,9 @@ impl LoadCase for KzgInclusionMerkleProofValidity { ssz_decode_file::>(&path.join("object.ssz_snappy"))? .into() } + ForkName::Fulu => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } }; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. @@ -246,6 +249,9 @@ impl LoadCase for BeaconBlockBodyMerkleProofValidity { ssz_decode_file::>(&path.join("object.ssz_snappy"))? .into() } + ForkName::Fulu => { + ssz_decode_file::>(&path.join("object.ssz_snappy"))?.into() + } }; let merkle_proof = yaml_decode_file(&path.join("proof.yaml"))?; // Metadata does not exist in these tests but it is left like this just in case. diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index d8cade296b..adb5bee768 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -98,29 +98,24 @@ impl Operation for Attestation { ) -> Result<(), BlockProcessingError> { initialize_epoch_cache(state, spec)?; let mut ctxt = ConsensusContext::new(state.slot()); - match state { - BeaconState::Base(_) => base::process_attestations( + if state.fork_name_unchecked().altair_enabled() { + initialize_progressive_balances_cache(state, spec)?; + altair_deneb::process_attestation( + state, + self.to_ref(), + 0, + &mut ctxt, + VerifySignatures::True, + spec, + ) + } else { + base::process_attestations( state, [self.clone().to_ref()].into_iter(), VerifySignatures::True, &mut ctxt, spec, - ), - BeaconState::Altair(_) - | BeaconState::Bellatrix(_) - | BeaconState::Capella(_) - | BeaconState::Deneb(_) - | BeaconState::Electra(_) => { - initialize_progressive_balances_cache(state, spec)?; - altair_deneb::process_attestation( - state, - self.to_ref(), - 0, - &mut ctxt, - VerifySignatures::True, - spec, - ) - } + ) } } } @@ -131,14 +126,11 @@ impl Operation for AttesterSlashing { } fn decode(path: &Path, fork_name: ForkName, _spec: &ChainSpec) -> Result { - Ok(match fork_name { - ForkName::Base - | ForkName::Altair - | ForkName::Bellatrix - | ForkName::Capella - | ForkName::Deneb => Self::Base(ssz_decode_file(path)?), - ForkName::Electra => Self::Electra(ssz_decode_file(path)?), - }) + if fork_name.electra_enabled() { + Ok(Self::Electra(ssz_decode_file(path)?)) + } else { + Ok(Self::Base(ssz_decode_file(path)?)) + } } fn apply_to( @@ -308,6 +300,7 @@ impl Operation for BeaconBlockBody> { ForkName::Capella => BeaconBlockBody::Capella(<_>::from_ssz_bytes(bytes)?), ForkName::Deneb => BeaconBlockBody::Deneb(<_>::from_ssz_bytes(bytes)?), ForkName::Electra => BeaconBlockBody::Electra(<_>::from_ssz_bytes(bytes)?), + ForkName::Fulu => BeaconBlockBody::Fulu(<_>::from_ssz_bytes(bytes)?), _ => panic!(), }) }) @@ -363,6 +356,10 @@ impl Operation for BeaconBlockBody> { let inner = >>::from_ssz_bytes(bytes)?; BeaconBlockBody::Electra(inner.clone_as_blinded()) } + ForkName::Fulu => { + let inner = >>::from_ssz_bytes(bytes)?; + BeaconBlockBody::Electra(inner.clone_as_blinded()) + } _ => panic!(), }) }) diff --git a/testing/ef_tests/src/cases/transition.rs b/testing/ef_tests/src/cases/transition.rs index dc5029d53e..6d037dae87 100644 --- a/testing/ef_tests/src/cases/transition.rs +++ b/testing/ef_tests/src/cases/transition.rs @@ -60,6 +60,14 @@ impl LoadCase for TransitionTest { spec.deneb_fork_epoch = Some(Epoch::new(0)); spec.electra_fork_epoch = Some(metadata.fork_epoch); } + ForkName::Fulu => { + spec.altair_fork_epoch = Some(Epoch::new(0)); + spec.bellatrix_fork_epoch = Some(Epoch::new(0)); + spec.capella_fork_epoch = Some(Epoch::new(0)); + spec.deneb_fork_epoch = Some(Epoch::new(0)); + spec.electra_fork_epoch = Some(Epoch::new(0)); + spec.fulu_fork_epoch = Some(metadata.fork_epoch); + } } // Load blocks diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index f4a09de32c..e7c148645c 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -24,7 +24,7 @@ pub trait Handler { // Add forks here to exclude them from EF spec testing. Helpful for adding future or // unspecified forks. fn disabled_forks(&self) -> Vec { - vec![] + vec![ForkName::Fulu] } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { @@ -287,6 +287,10 @@ impl SszStaticHandler { Self::for_forks(vec![ForkName::Electra]) } + pub fn fulu_only() -> Self { + Self::for_forks(vec![ForkName::Fulu]) + } + pub fn altair_and_later() -> Self { Self::for_forks(ForkName::list_all()[1..].to_vec()) } @@ -307,6 +311,10 @@ impl SszStaticHandler { Self::for_forks(ForkName::list_all()[5..].to_vec()) } + pub fn fulu_and_later() -> Self { + Self::for_forks(ForkName::list_all()[6..].to_vec()) + } + pub fn pre_electra() -> Self { Self::for_forks(ForkName::list_all()[0..5].to_vec()) } diff --git a/testing/simulator/src/basic_sim.rs b/testing/simulator/src/basic_sim.rs index 8f659a893f..82a7028582 100644 --- a/testing/simulator/src/basic_sim.rs +++ b/testing/simulator/src/basic_sim.rs @@ -22,7 +22,8 @@ const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 1; const DENEB_FORK_EPOCH: u64 = 2; -//const ELECTRA_FORK_EPOCH: u64 = 3; +// const ELECTRA_FORK_EPOCH: u64 = 3; +// const FULU_FORK_EPOCH: u64 = 4; const SUGGESTED_FEE_RECIPIENT: [u8; 20] = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]; @@ -118,6 +119,7 @@ pub fn run_basic_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + //spec.fulu_fork_epoch = Some(Epoch::new(FULU_FORK_EPOCH)); let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); diff --git a/testing/simulator/src/fallback_sim.rs b/testing/simulator/src/fallback_sim.rs index b3b9a46001..7d4bdfa264 100644 --- a/testing/simulator/src/fallback_sim.rs +++ b/testing/simulator/src/fallback_sim.rs @@ -21,7 +21,8 @@ const ALTAIR_FORK_EPOCH: u64 = 0; const BELLATRIX_FORK_EPOCH: u64 = 0; const CAPELLA_FORK_EPOCH: u64 = 1; const DENEB_FORK_EPOCH: u64 = 2; -//const ELECTRA_FORK_EPOCH: u64 = 3; +// const ELECTRA_FORK_EPOCH: u64 = 3; +// const FULU_FORK_EPOCH: u64 = 4; // Since simulator tests are non-deterministic and there is a non-zero chance of missed // attestations, define an acceptable network-wide attestation performance. @@ -123,6 +124,7 @@ pub fn run_fallback_sim(matches: &ArgMatches) -> Result<(), String> { spec.capella_fork_epoch = Some(Epoch::new(CAPELLA_FORK_EPOCH)); spec.deneb_fork_epoch = Some(Epoch::new(DENEB_FORK_EPOCH)); //spec.electra_fork_epoch = Some(Epoch::new(ELECTRA_FORK_EPOCH)); + //spec.fulu_fork_epoch = Some(Epoch::new(FULU_FORK_EPOCH)); let spec = Arc::new(spec); env.eth2_config.spec = spec.clone(); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index 59efc09baa..a95c15c231 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -88,6 +88,11 @@ fn default_mock_execution_config( + spec.seconds_per_slot * E::slots_per_epoch() * electra_fork_epoch.as_u64(), ) } + if let Some(fulu_fork_epoch) = spec.fulu_fork_epoch { + mock_execution_config.osaka_time = Some( + genesis_time + spec.seconds_per_slot * E::slots_per_epoch() * fulu_fork_epoch.as_u64(), + ) + } mock_execution_config } diff --git a/validator_client/beacon_node_fallback/src/lib.rs b/validator_client/beacon_node_fallback/src/lib.rs index 95a221f189..beae176193 100644 --- a/validator_client/beacon_node_fallback/src/lib.rs +++ b/validator_client/beacon_node_fallback/src/lib.rs @@ -361,6 +361,14 @@ impl CandidateBeaconNode { "endpoint_electra_fork_epoch" => ?beacon_node_spec.electra_fork_epoch, "hint" => UPDATE_REQUIRED_LOG_HINT, ); + } else if beacon_node_spec.fulu_fork_epoch != spec.fulu_fork_epoch { + warn!( + log, + "Beacon node has mismatched Fulu fork epoch"; + "endpoint" => %self.beacon_node, + "endpoint_fulu_fork_epoch" => ?beacon_node_spec.fulu_fork_epoch, + "hint" => UPDATE_REQUIRED_LOG_HINT, + ); } Ok(()) diff --git a/validator_client/http_api/src/test_utils.rs b/validator_client/http_api/src/test_utils.rs index 390095eec7..0531626846 100644 --- a/validator_client/http_api/src/test_utils.rs +++ b/validator_client/http_api/src/test_utils.rs @@ -251,9 +251,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); diff --git a/validator_client/http_api/src/tests.rs b/validator_client/http_api/src/tests.rs index 7ea3d7ebaa..4e9acc4237 100644 --- a/validator_client/http_api/src/tests.rs +++ b/validator_client/http_api/src/tests.rs @@ -214,9 +214,9 @@ impl ApiTester { pub async fn test_get_lighthouse_spec(self) -> Self { let result = self .client - .get_lighthouse_spec::() + .get_lighthouse_spec::() .await - .map(|res| ConfigAndPreset::Electra(res.data)) + .map(|res| ConfigAndPreset::Fulu(res.data)) .unwrap(); let expected = ConfigAndPreset::from_chain_spec::(&E::default_spec(), None); diff --git a/validator_client/signing_method/src/web3signer.rs b/validator_client/signing_method/src/web3signer.rs index 86e7015ad3..d286449d20 100644 --- a/validator_client/signing_method/src/web3signer.rs +++ b/validator_client/signing_method/src/web3signer.rs @@ -29,6 +29,7 @@ pub enum ForkName { Capella, Deneb, Electra, + Fulu, } #[derive(Debug, PartialEq, Serialize)] @@ -107,6 +108,11 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> Web3SignerObject<'a, E, Pa block: None, block_header: Some(block.block_header()), }), + BeaconBlock::Fulu(_) => Ok(Web3SignerObject::BeaconBlock { + version: ForkName::Fulu, + block: None, + block_header: Some(block.block_header()), + }), } } From 05727290fbef27df72e854de0ca9280ee6347101 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 10 Jan 2025 12:04:58 +0530 Subject: [PATCH 10/15] Make max_blobs_per_block a config parameter (#6329) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Remove empty RuntimeVarList awefullness * Fix tests * Simplify BlobSidecarListFromRoot * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Fix max value * Fix doctest * Fix formatting * Fix max check * Delete hardcoded max_blobs_per_block in RPC limits * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset --- .cargo/config.toml | 1 + beacon_node/beacon_chain/src/beacon_chain.rs | 16 +- .../beacon_chain/src/blob_verification.rs | 2 +- .../src/block_verification_types.rs | 20 +-- .../src/data_availability_checker.rs | 14 +- .../overflow_lru_cache.rs | 147 ++++++++++-------- beacon_node/beacon_chain/src/fetch_blobs.rs | 10 +- beacon_node/beacon_chain/src/kzg_utils.rs | 17 +- .../src/observed_data_sidecars.rs | 20 +-- beacon_node/beacon_chain/src/test_utils.rs | 31 ++-- .../tests/attestation_production.rs | 9 +- .../beacon_chain/tests/block_verification.rs | 18 ++- beacon_node/beacon_chain/tests/events.rs | 2 +- beacon_node/beacon_chain/tests/store_tests.rs | 29 ++-- beacon_node/client/src/builder.rs | 4 +- .../execution_layer/src/engine_api/http.rs | 3 +- .../test_utils/execution_block_generator.rs | 11 +- .../src/test_utils/mock_execution_layer.rs | 9 +- .../execution_layer/src/test_utils/mod.rs | 9 +- beacon_node/http_api/src/block_id.rs | 25 +-- .../tests/broadcast_validation_tests.rs | 3 +- .../lighthouse_network/src/rpc/codec.rs | 22 ++- .../lighthouse_network/src/rpc/config.rs | 4 +- .../lighthouse_network/src/rpc/handler.rs | 6 +- .../lighthouse_network/src/rpc/methods.rs | 6 +- beacon_node/lighthouse_network/src/rpc/mod.rs | 5 +- .../lighthouse_network/src/rpc/protocol.rs | 36 ++--- .../src/rpc/rate_limiter.rs | 24 ++- .../src/rpc/self_limiter.rs | 20 ++- .../network_beacon_processor/rpc_methods.rs | 8 - .../src/network_beacon_processor/tests.rs | 11 +- .../src/sync/block_sidecar_coupling.rs | 72 ++++++--- beacon_node/network/src/sync/manager.rs | 2 + .../network/src/sync/network_context.rs | 38 ++++- .../src/sync/network_context/requests.rs | 1 + beacon_node/network/src/sync/tests/lookups.rs | 16 +- beacon_node/network/src/sync/tests/mod.rs | 3 +- .../store/src/blob_sidecar_list_from_root.rs | 42 +++++ beacon_node/store/src/hot_cold_store.rs | 38 +++-- .../store/src/impls/execution_payload.rs | 3 +- beacon_node/store/src/lib.rs | 2 + .../chiado/config.yaml | 2 + .../gnosis/config.yaml | 2 + .../holesky/config.yaml | 2 + .../mainnet/config.yaml | 2 + .../sepolia/config.yaml | 2 + .../src/per_block_processing.rs | 6 +- consensus/types/presets/gnosis/deneb.yaml | 2 - consensus/types/presets/mainnet/deneb.yaml | 2 - consensus/types/presets/minimal/deneb.yaml | 2 - consensus/types/src/beacon_block_body.rs | 2 - consensus/types/src/blob_sidecar.rs | 33 ++-- consensus/types/src/chain_spec.rs | 26 ++++ consensus/types/src/data_column_sidecar.rs | 17 +- consensus/types/src/eth_spec.rs | 12 +- consensus/types/src/lib.rs | 2 + consensus/types/src/preset.rs | 3 - consensus/types/src/runtime_fixed_vector.rs | 81 ++++++++++ consensus/types/src/runtime_var_list.rs | 19 ++- lcli/src/mock_el.rs | 5 +- testing/node_test_rig/src/lib.rs | 9 +- 61 files changed, 655 insertions(+), 335 deletions(-) create mode 100644 beacon_node/store/src/blob_sidecar_list_from_root.rs create mode 100644 consensus/types/src/runtime_fixed_vector.rs diff --git a/.cargo/config.toml b/.cargo/config.toml index a408305c4d..dac0163003 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,3 +1,4 @@ [env] # Set the number of arenas to 16 when using jemalloc. JEMALLOC_SYS_WITH_MALLOC_CONF = "abort_conf:true,narenas:16" + diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index d84cd9615a..81783267ba 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -117,7 +117,8 @@ use std::sync::Arc; use std::time::Duration; use store::iter::{BlockRootsIterator, ParentRootBlockIterator, StateRootsIterator}; use store::{ - DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, KeyValueStoreOp, StoreItem, StoreOp, + BlobSidecarListFromRoot, DatabaseBlock, Error as DBError, HotColdDB, KeyValueStore, + KeyValueStoreOp, StoreItem, StoreOp, }; use task_executor::{ShutdownReason, TaskExecutor}; use tokio::sync::mpsc::Receiver; @@ -1147,9 +1148,10 @@ impl BeaconChain { pub fn get_blobs_checking_early_attester_cache( &self, block_root: &Hash256, - ) -> Result, Error> { + ) -> Result, Error> { self.early_attester_cache .get_blobs(*block_root) + .map(Into::into) .map_or_else(|| self.get_blobs(block_root), Ok) } @@ -1240,11 +1242,11 @@ impl BeaconChain { /// /// ## Errors /// May return a database error. - pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { - match self.store.get_blobs(block_root)? { - Some(blobs) => Ok(blobs), - None => Ok(BlobSidecarList::default()), - } + pub fn get_blobs( + &self, + block_root: &Hash256, + ) -> Result, Error> { + self.store.get_blobs(block_root).map_err(Error::from) } /// Returns the data columns at the given root, if any. diff --git a/beacon_node/beacon_chain/src/blob_verification.rs b/beacon_node/beacon_chain/src/blob_verification.rs index 6c87deb826..786b627bb7 100644 --- a/beacon_node/beacon_chain/src/blob_verification.rs +++ b/beacon_node/beacon_chain/src/blob_verification.rs @@ -400,7 +400,7 @@ pub fn validate_blob_sidecar_for_gossip= T::EthSpec::max_blobs_per_block() as u64 { + if blob_index >= chain.spec.max_blobs_per_block(blob_epoch) { return Err(GossipBlobError::InvalidSubnet { expected: subnet, received: blob_index, diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 420c83081c..0bf3007e9b 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -4,11 +4,10 @@ use crate::data_column_verification::{CustodyDataColumn, CustodyDataColumnList}; use crate::eth1_finalization_cache::Eth1FinalizationData; use crate::{get_block_root, PayloadVerificationOutcome}; use derivative::Derivative; -use ssz_types::VariableList; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use types::blob_sidecar::{BlobIdentifier, FixedBlobSidecarList}; +use types::blob_sidecar::BlobIdentifier; use types::{ BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, @@ -176,23 +175,6 @@ impl RpcBlock { }) } - pub fn new_from_fixed( - block_root: Hash256, - block: Arc>, - blobs: FixedBlobSidecarList, - ) -> Result { - let filtered = blobs - .into_iter() - .filter_map(|b| b.clone()) - .collect::>(); - let blobs = if filtered.is_empty() { - None - } else { - Some(VariableList::from(filtered)) - }; - Self::new(Some(block_root), block, blobs) - } - #[allow(clippy::type_complexity)] pub fn deconstruct( self, diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index f6002ea0ac..4c5152239c 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -215,9 +215,12 @@ impl DataAvailabilityChecker { // Note: currently not reporting which specific blob is invalid because we fetch all blobs // from the same peer for both lookup and range sync. - let verified_blobs = - KzgVerifiedBlobList::new(blobs.iter().flatten().cloned(), &self.kzg, seen_timestamp) - .map_err(AvailabilityCheckError::InvalidBlobs)?; + let verified_blobs = KzgVerifiedBlobList::new( + blobs.into_vec().into_iter().flatten(), + &self.kzg, + seen_timestamp, + ) + .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) @@ -400,14 +403,13 @@ impl DataAvailabilityChecker { blocks: Vec>, ) -> Result>, AvailabilityCheckError> { let mut results = Vec::with_capacity(blocks.len()); - let all_blobs: BlobSidecarList = blocks + let all_blobs = blocks .iter() .filter(|block| self.blobs_required_for_block(block.as_block())) // this clone is cheap as it's cloning an Arc .filter_map(|block| block.blobs().cloned()) .flatten() - .collect::>() - .into(); + .collect::>(); // verify kzg for all blobs at once if !all_blobs.is_empty() { diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 5ce023038d..44148922f4 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -10,13 +10,12 @@ use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; use slog::{debug, Logger}; -use ssz_types::FixedVector; use std::num::NonZeroUsize; use std::sync::Arc; use types::blob_sidecar::BlobIdentifier; use types::{ BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, - Hash256, SignedBeaconBlock, + Hash256, RuntimeFixedVector, RuntimeVariableList, SignedBeaconBlock, }; /// This represents the components of a partially available block @@ -28,7 +27,7 @@ use types::{ #[derive(Clone)] pub struct PendingComponents { pub block_root: Hash256, - pub verified_blobs: FixedVector>, E::MaxBlobsPerBlock>, + pub verified_blobs: RuntimeFixedVector>>, pub verified_data_columns: Vec>, pub executed_block: Option>, pub reconstruction_started: bool, @@ -41,9 +40,7 @@ impl PendingComponents { } /// Returns an immutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs( - &self, - ) -> &FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs(&self) -> &RuntimeFixedVector>> { &self.verified_blobs } @@ -64,9 +61,7 @@ impl PendingComponents { } /// Returns a mutable reference to the fixed vector of cached blobs. - pub fn get_cached_blobs_mut( - &mut self, - ) -> &mut FixedVector>, E::MaxBlobsPerBlock> { + pub fn get_cached_blobs_mut(&mut self) -> &mut RuntimeFixedVector>> { &mut self.verified_blobs } @@ -138,10 +133,7 @@ impl PendingComponents { /// Blobs are only inserted if: /// 1. The blob entry at the index is empty and no block exists. /// 2. The block exists and its commitment matches the blob's commitment. - pub fn merge_blobs( - &mut self, - blobs: FixedVector>, E::MaxBlobsPerBlock>, - ) { + pub fn merge_blobs(&mut self, blobs: RuntimeFixedVector>>) { for (index, blob) in blobs.iter().cloned().enumerate() { let Some(blob) = blob else { continue }; self.merge_single_blob(index, blob); @@ -185,7 +177,7 @@ impl PendingComponents { /// Blobs that don't match the new block's commitments are evicted. pub fn merge_block(&mut self, block: DietAvailabilityPendingExecutedBlock) { self.insert_block(block); - let reinsert = std::mem::take(self.get_cached_blobs_mut()); + let reinsert = self.get_cached_blobs_mut().take(); self.merge_blobs(reinsert); } @@ -237,10 +229,10 @@ impl PendingComponents { } /// Returns an empty `PendingComponents` object with the given block root. - pub fn empty(block_root: Hash256) -> Self { + pub fn empty(block_root: Hash256, max_len: usize) -> Self { Self { block_root, - verified_blobs: FixedVector::default(), + verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), verified_data_columns: vec![], executed_block: None, reconstruction_started: false, @@ -299,7 +291,11 @@ impl PendingComponents { else { return Err(AvailabilityCheckError::Unexpected); }; - (Some(verified_blobs), None) + let max_len = spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; + ( + Some(RuntimeVariableList::new(verified_blobs, max_len)?), + None, + ) }; let executed_block = recover(diet_executed_block)?; @@ -341,10 +337,7 @@ impl PendingComponents { } if let Some(kzg_verified_data_column) = self.verified_data_columns.first() { - let epoch = kzg_verified_data_column - .as_data_column() - .slot() - .epoch(E::slots_per_epoch()); + let epoch = kzg_verified_data_column.as_data_column().epoch(); return Some(epoch); } @@ -457,7 +450,18 @@ impl DataAvailabilityCheckerInner { kzg_verified_blobs: I, log: &Logger, ) -> Result, AvailabilityCheckError> { - let mut fixed_blobs = FixedVector::default(); + let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); + + let Some(epoch) = kzg_verified_blobs + .peek() + .map(|verified_blob| verified_blob.as_blob().epoch()) + else { + // Verified blobs list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + + let mut fixed_blobs = + RuntimeFixedVector::new(vec![None; self.spec.max_blobs_per_block(epoch) as usize]); for blob in kzg_verified_blobs { if let Some(blob_opt) = fixed_blobs.get_mut(blob.blob_index() as usize) { @@ -471,7 +475,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); @@ -498,13 +504,24 @@ impl DataAvailabilityCheckerInner { kzg_verified_data_columns: I, log: &Logger, ) -> Result, AvailabilityCheckError> { + let mut kzg_verified_data_columns = kzg_verified_data_columns.into_iter().peekable(); + let Some(epoch) = kzg_verified_data_columns + .peek() + .map(|verified_blob| verified_blob.as_data_column().epoch()) + else { + // Verified data_columns list should be non-empty. + return Err(AvailabilityCheckError::Unexpected); + }; + let mut write_lock = self.critical.write(); // Grab existing entry or create a new entry. let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; @@ -581,6 +598,7 @@ impl DataAvailabilityCheckerInner { log: &Logger, ) -> Result, AvailabilityCheckError> { let mut write_lock = self.critical.write(); + let epoch = executed_block.as_block().epoch(); let block_root = executed_block.import_data.block_root; // register the block to get the diet block @@ -592,7 +610,9 @@ impl DataAvailabilityCheckerInner { let mut pending_components = write_lock .pop_entry(&block_root) .map(|(_, v)| v) - .unwrap_or_else(|| PendingComponents::empty(block_root)); + .unwrap_or_else(|| { + PendingComponents::empty(block_root, self.spec.max_blobs_per_block(epoch) as usize) + }); // Merge in the block. pending_components.merge_block(diet_executed_block); @@ -812,7 +832,8 @@ mod test { info!(log, "done printing kzg commitments"); let gossip_verified_blobs = if let Some((kzg_proofs, blobs)) = maybe_blobs { - let sidecars = BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap(); + let sidecars = + BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap(); Vec::from(sidecars) .into_iter() .map(|sidecar| { @@ -945,6 +966,8 @@ mod test { assert_eq!(cache.critical.read().len(), 1); } } + // remove the blob to simulate successful import + cache.remove_pending_components(root); assert!( cache.critical.read().is_empty(), "cache should be empty now that all components available" @@ -1125,7 +1148,7 @@ mod pending_components_tests { use super::*; use crate::block_verification_types::BlockImportData; use crate::eth1_finalization_cache::Eth1FinalizationData; - use crate::test_utils::{generate_rand_block_and_blobs, NumBlobs}; + use crate::test_utils::{generate_rand_block_and_blobs, test_spec, NumBlobs}; use crate::PayloadVerificationOutcome; use fork_choice::PayloadVerificationStatus; use kzg::KzgCommitment; @@ -1141,15 +1164,19 @@ mod pending_components_tests { type Setup = ( SignedBeaconBlock, - FixedVector>>, ::MaxBlobsPerBlock>, - FixedVector>>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>>, + RuntimeFixedVector>>>, + usize, ); pub fn pre_setup() -> Setup { let mut rng = StdRng::seed_from_u64(0xDEADBEEF0BAD5EEDu64); + let spec = test_spec::(); let (block, blobs_vec) = - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng); - let mut blobs: FixedVector<_, ::MaxBlobsPerBlock> = FixedVector::default(); + generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Random, &mut rng, &spec); + let max_len = spec.max_blobs_per_block(block.epoch()) as usize; + let mut blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for blob in blobs_vec { if let Some(b) = blobs.get_mut(blob.index as usize) { @@ -1157,10 +1184,8 @@ mod pending_components_tests { } } - let mut invalid_blobs: FixedVector< - Option>>, - ::MaxBlobsPerBlock, - > = FixedVector::default(); + let mut invalid_blobs: RuntimeFixedVector>>> = + RuntimeFixedVector::default(max_len); for (index, blob) in blobs.iter().enumerate() { if let Some(invalid_blob) = blob { let mut blob_copy = invalid_blob.as_ref().clone(); @@ -1169,21 +1194,21 @@ mod pending_components_tests { } } - (block, blobs, invalid_blobs) + (block, blobs, invalid_blobs, max_len) } type PendingComponentsSetup = ( DietAvailabilityPendingExecutedBlock, - FixedVector>, ::MaxBlobsPerBlock>, - FixedVector>, ::MaxBlobsPerBlock>, + RuntimeFixedVector>>, + RuntimeFixedVector>>, ); pub fn setup_pending_components( block: SignedBeaconBlock, - valid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, - invalid_blobs: FixedVector>>, ::MaxBlobsPerBlock>, + valid_blobs: RuntimeFixedVector>>>, + invalid_blobs: RuntimeFixedVector>>>, ) -> PendingComponentsSetup { - let blobs = FixedVector::from( + let blobs = RuntimeFixedVector::new( valid_blobs .iter() .map(|blob_opt| { @@ -1193,7 +1218,7 @@ mod pending_components_tests { }) .collect::>(), ); - let invalid_blobs = FixedVector::from( + let invalid_blobs = RuntimeFixedVector::new( invalid_blobs .iter() .map(|blob_opt| { @@ -1225,10 +1250,10 @@ mod pending_components_tests { (block.into(), blobs, invalid_blobs) } - pub fn assert_cache_consistent(cache: PendingComponents) { + pub fn assert_cache_consistent(cache: PendingComponents, max_len: usize) { if let Some(cached_block) = cache.get_cached_block() { let cached_block_commitments = cached_block.get_commitments(); - for index in 0..E::max_blobs_per_block() { + for index in 0..max_len { let block_commitment = cached_block_commitments.get(index).copied(); let blob_commitment_opt = cache.get_cached_blobs().get(index).unwrap(); let blob_commitment = blob_commitment_opt.as_ref().map(|b| *b.get_commitment()); @@ -1247,40 +1272,40 @@ mod pending_components_tests { #[test] fn valid_block_invalid_blobs_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_block_valid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); cache.merge_blobs(blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn invalid_blobs_valid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(random_blobs); cache.merge_blobs(blobs); cache.merge_block(block_commitments); @@ -1290,46 +1315,46 @@ mod pending_components_tests { #[test] fn block_valid_blobs_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_block(block_commitments); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_block_invalid_blobs() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_block(block_commitments); cache.merge_blobs(random_blobs); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } #[test] fn valid_blobs_invalid_blobs_block() { - let (block_commitments, blobs, random_blobs) = pre_setup(); + let (block_commitments, blobs, random_blobs, max_len) = pre_setup(); let (block_commitments, blobs, random_blobs) = setup_pending_components(block_commitments, blobs, random_blobs); let block_root = Hash256::zero(); - let mut cache = >::empty(block_root); + let mut cache = >::empty(block_root, max_len); cache.merge_blobs(blobs); cache.merge_blobs(random_blobs); cache.merge_block(block_commitments); - assert_cache_consistent(cache); + assert_cache_consistent(cache, max_len); } } diff --git a/beacon_node/beacon_chain/src/fetch_blobs.rs b/beacon_node/beacon_chain/src/fetch_blobs.rs index f740b693fb..f1646072c9 100644 --- a/beacon_node/beacon_chain/src/fetch_blobs.rs +++ b/beacon_node/beacon_chain/src/fetch_blobs.rs @@ -21,8 +21,8 @@ use std::sync::Arc; use tokio::sync::mpsc::Receiver; use types::blob_sidecar::{BlobSidecarError, FixedBlobSidecarList}; use types::{ - BeaconStateError, BlobSidecar, DataColumnSidecar, DataColumnSidecarList, EthSpec, FullPayload, - Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, + BeaconStateError, BlobSidecar, ChainSpec, DataColumnSidecar, DataColumnSidecarList, EthSpec, + FullPayload, Hash256, SignedBeaconBlock, SignedBeaconBlockHeader, }; pub enum BlobsOrDataColumns { @@ -112,6 +112,7 @@ pub async fn fetch_and_process_engine_blobs( response, signed_block_header, &kzg_commitments_proof, + &chain.spec, )?; let num_fetched_blobs = fixed_blob_sidecar_list @@ -275,8 +276,11 @@ fn build_blob_sidecars( response: Vec>>, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &FixedVector, + spec: &ChainSpec, ) -> Result, FetchEngineBlobError> { - let mut fixed_blob_sidecar_list = FixedBlobSidecarList::default(); + let epoch = block.epoch(); + let mut fixed_blob_sidecar_list = + FixedBlobSidecarList::default(spec.max_blobs_per_block(epoch) as usize); for (index, blob_and_proof) in response .into_iter() .enumerate() diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index bd47e82215..e32ee9c24b 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -194,9 +194,11 @@ fn build_data_column_sidecars( spec: &ChainSpec, ) -> Result, String> { let number_of_columns = spec.number_of_columns; - let mut columns = vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; - let mut column_kzg_proofs = - vec![Vec::with_capacity(E::max_blobs_per_block()); number_of_columns]; + let max_blobs_per_block = spec + .max_blobs_per_block(signed_block_header.message.slot.epoch(E::slots_per_epoch())) + as usize; + let mut columns = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; + let mut column_kzg_proofs = vec![Vec::with_capacity(max_blobs_per_block); number_of_columns]; for (blob_cells, blob_cell_proofs) in blob_cells_and_proofs_vec { // we iterate over each column, and we construct the column from "top to bottom", @@ -253,6 +255,7 @@ pub fn reconstruct_blobs( data_columns: &[Arc>], blob_indices_opt: Option>, signed_block: &SignedBlindedBeaconBlock, + spec: &ChainSpec, ) -> Result, String> { // The data columns are from the database, so we assume their correctness. let first_data_column = data_columns @@ -315,10 +318,11 @@ pub fn reconstruct_blobs( .map(Arc::new) .map_err(|e| format!("{e:?}")) }) - .collect::, _>>()? - .into(); + .collect::, _>>()?; + + let max_blobs = spec.max_blobs_per_block(signed_block.epoch()) as usize; - Ok(blob_sidecars) + BlobSidecarList::new(blob_sidecars, max_blobs).map_err(|e| format!("{e:?}")) } /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). @@ -478,6 +482,7 @@ mod test { &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], Some(blob_indices.clone()), &signed_blinded_block, + spec, ) .unwrap(); diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index a9f4664064..48989e07d3 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -24,7 +24,7 @@ pub trait ObservableDataSidecar { fn slot(&self) -> Slot; fn block_proposer_index(&self) -> u64; fn index(&self) -> u64; - fn max_num_of_items(spec: &ChainSpec) -> usize; + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize; } impl ObservableDataSidecar for BlobSidecar { @@ -40,8 +40,8 @@ impl ObservableDataSidecar for BlobSidecar { self.index } - fn max_num_of_items(_spec: &ChainSpec) -> usize { - E::max_blobs_per_block() + fn max_num_of_items(spec: &ChainSpec, slot: Slot) -> usize { + spec.max_blobs_per_block(slot.epoch(E::slots_per_epoch())) as usize } } @@ -58,7 +58,7 @@ impl ObservableDataSidecar for DataColumnSidecar { self.index } - fn max_num_of_items(spec: &ChainSpec) -> usize { + fn max_num_of_items(spec: &ChainSpec, _slot: Slot) -> usize { spec.number_of_columns } } @@ -103,7 +103,9 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .or_insert_with(|| HashSet::with_capacity(T::max_num_of_items(&self.spec))); + .or_insert_with(|| { + HashSet::with_capacity(T::max_num_of_items(&self.spec, data_sidecar.slot())) + }); let did_not_exist = data_indices.insert(data_sidecar.index()); Ok(!did_not_exist) @@ -123,7 +125,7 @@ impl ObservedDataSidecars { } fn sanitize_data_sidecar(&self, data_sidecar: &T) -> Result<(), Error> { - if data_sidecar.index() >= T::max_num_of_items(&self.spec) as u64 { + if data_sidecar.index() >= T::max_num_of_items(&self.spec, data_sidecar.slot()) as u64 { return Err(Error::InvalidDataIndex(data_sidecar.index())); } let finalized_slot = self.finalized_slot; @@ -179,7 +181,7 @@ mod tests { use crate::test_utils::test_spec; use bls::Hash256; use std::sync::Arc; - use types::MainnetEthSpec; + use types::{Epoch, MainnetEthSpec}; type E = MainnetEthSpec; @@ -333,7 +335,7 @@ mod tests { #[test] fn simple_observations() { let spec = Arc::new(test_spec::()); - let mut cache = ObservedDataSidecars::>::new(spec); + let mut cache = ObservedDataSidecars::>::new(spec.clone()); // Slot 0, index 0 let proposer_index_a = 420; @@ -489,7 +491,7 @@ mod tests { ); // Try adding an out of bounds index - let invalid_index = E::max_blobs_per_block() as u64; + let invalid_index = spec.max_blobs_per_block(Epoch::new(0)); let sidecar_d = get_blob_sidecar(0, proposer_index_a, invalid_index); assert_eq!( cache.observe_sidecar(&sidecar_d), diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index d37398e4e0..fd3cc49626 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -514,7 +514,7 @@ where pub fn mock_execution_layer_with_config(mut self) -> Self { let mock = mock_execution_layer_from_parts::( - self.spec.as_ref().expect("cannot build without spec"), + self.spec.clone().expect("cannot build without spec"), self.runtime.task_executor.clone(), ); self.execution_layer = Some(mock.el.clone()); @@ -614,7 +614,7 @@ where } pub fn mock_execution_layer_from_parts( - spec: &ChainSpec, + spec: Arc, task_executor: TaskExecutor, ) -> MockExecutionLayer { let shanghai_time = spec.capella_fork_epoch.map(|epoch| { @@ -630,7 +630,7 @@ pub fn mock_execution_layer_from_parts( HARNESS_GENESIS_TIME + spec.seconds_per_slot * E::slots_per_epoch() * epoch.as_u64() }); - let kzg = get_kzg(spec); + let kzg = get_kzg(&spec); MockExecutionLayer::new( task_executor, @@ -640,7 +640,7 @@ pub fn mock_execution_layer_from_parts( prague_time, osaka_time, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec.clone(), + spec, Some(kzg), ) } @@ -749,15 +749,15 @@ where pub fn get_head_block(&self) -> RpcBlock { let block = self.chain.head_beacon_block(); let block_root = block.canonical_root(); - let blobs = self.chain.get_blobs(&block_root).unwrap(); - RpcBlock::new(Some(block_root), block, Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(&block_root).unwrap().blobs(); + RpcBlock::new(Some(block_root), block, blobs).unwrap() } pub fn get_full_block(&self, block_root: &Hash256) -> RpcBlock { let block = self.chain.get_blinded_block(block_root).unwrap().unwrap(); let full_block = self.chain.store.make_full_block(block_root, block).unwrap(); - let blobs = self.chain.get_blobs(block_root).unwrap(); - RpcBlock::new(Some(*block_root), Arc::new(full_block), Some(blobs)).unwrap() + let blobs = self.chain.get_blobs(block_root).unwrap().blobs(); + RpcBlock::new(Some(*block_root), Arc::new(full_block), blobs).unwrap() } pub fn get_all_validators(&self) -> Vec { @@ -2020,7 +2020,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_hash: SignedBeaconBlockHash = self @@ -2046,7 +2046,7 @@ where let (block, blob_items) = block_contents; let sidecars = blob_items - .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs)) + .map(|(proofs, blobs)| BlobSidecar::build_sidecars(blobs, &block, proofs, &self.spec)) .transpose() .unwrap(); let block_root = block.canonical_root(); @@ -2817,11 +2817,12 @@ pub fn generate_rand_block_and_blobs( fork_name: ForkName, num_blobs: NumBlobs, rng: &mut impl Rng, + spec: &ChainSpec, ) -> (SignedBeaconBlock>, Vec>) { let inner = map_fork_name!(fork_name, BeaconBlock, <_>::random_for_test(rng)); let mut block = SignedBeaconBlock::from_block(inner, types::Signature::random_for_test(rng)); - + let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; let mut blob_sidecars = vec![]; let bundle = match block { @@ -2831,7 +2832,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadDeneb = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2851,7 +2852,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadElectra = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2870,7 +2871,7 @@ pub fn generate_rand_block_and_blobs( // Get either zero blobs or a random number of blobs between 1 and Max Blobs. let payload: &mut FullPayloadFulu = &mut message.body.execution_payload; let num_blobs = match num_blobs { - NumBlobs::Random => rng.gen_range(1..=E::max_blobs_per_block()), + NumBlobs::Random => rng.gen_range(1..=max_blobs), NumBlobs::Number(n) => n, NumBlobs::None => 0, }; @@ -2924,7 +2925,7 @@ pub fn generate_rand_block_and_data_columns( DataColumnSidecarList, ) { let kzg = get_kzg(spec); - let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng); + let (block, blobs) = generate_rand_block_and_blobs(fork_name, num_blobs, rng, spec); let blob_refs = blobs.iter().map(|b| &b.blob).collect::>(); let data_columns = blobs_to_data_column_sidecars(&blob_refs, &block, &kzg, spec).unwrap(); diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 87fefe7114..6000115993 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -155,7 +155,7 @@ async fn produces_attestations() { .store .make_full_block(&block_root, blinded_block) .unwrap(); - let blobs = chain.get_blobs(&block_root).unwrap(); + let blobs = chain.get_blobs(&block_root).unwrap().blobs(); let epoch_boundary_slot = state .current_epoch() @@ -223,7 +223,7 @@ async fn produces_attestations() { assert_eq!(data.target.root, target_root, "bad target root"); let rpc_block = - RpcBlock::::new(None, Arc::new(block.clone()), Some(blobs.clone())) + RpcBlock::::new(None, Arc::new(block.clone()), blobs.clone()) .unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available( available_block, @@ -299,10 +299,11 @@ async fn early_attester_cache_old_request() { let head_blobs = harness .chain .get_blobs(&head.beacon_block_root) - .expect("should get blobs"); + .expect("should get blobs") + .blobs(); let rpc_block = - RpcBlock::::new(None, head.beacon_block.clone(), Some(head_blobs)).unwrap(); + RpcBlock::::new(None, head.beacon_block.clone(), head_blobs).unwrap(); let beacon_chain::data_availability_checker::MaybeAvailableBlock::Available(available_block) = harness .chain diff --git a/beacon_node/beacon_chain/tests/block_verification.rs b/beacon_node/beacon_chain/tests/block_verification.rs index 103734b224..b61f758cac 100644 --- a/beacon_node/beacon_chain/tests/block_verification.rs +++ b/beacon_node/beacon_chain/tests/block_verification.rs @@ -65,12 +65,13 @@ async fn get_chain_segment() -> (Vec>, Vec( signed_block: &SignedBeaconBlock, blobs: &mut BlobSidecarList, ) { - for old_blob_sidecar in blobs.iter_mut() { + for old_blob_sidecar in blobs.as_mut_slice() { let new_blob = Arc::new(BlobSidecar:: { index: old_blob_sidecar.index, blob: old_blob_sidecar.blob.clone(), @@ -1223,7 +1225,7 @@ async fn verify_block_for_gossip_slashing_detection() { let slasher = Arc::new( Slasher::open( SlasherConfig::new(slasher_dir.path().into()), - spec, + spec.clone(), test_logger(), ) .unwrap(), @@ -1247,7 +1249,7 @@ async fn verify_block_for_gossip_slashing_detection() { if let Some((kzg_proofs, blobs)) = blobs1 { let sidecars = - BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs).unwrap(); + BlobSidecar::build_sidecars(blobs, verified_block.block(), kzg_proofs, &spec).unwrap(); for sidecar in sidecars { let blob_index = sidecar.index; let verified_blob = harness diff --git a/beacon_node/beacon_chain/tests/events.rs b/beacon_node/beacon_chain/tests/events.rs index ab784d3be4..c9bd55e062 100644 --- a/beacon_node/beacon_chain/tests/events.rs +++ b/beacon_node/beacon_chain/tests/events.rs @@ -73,7 +73,7 @@ async fn blob_sidecar_event_on_process_rpc_blobs() { let blob_1 = Arc::new(blob_1); let blob_2 = Arc::new(blob_2); - let blobs = FixedBlobSidecarList::from(vec![Some(blob_1.clone()), Some(blob_2.clone())]); + let blobs = FixedBlobSidecarList::new(vec![Some(blob_1.clone()), Some(blob_2.clone())]); let expected_sse_blobs = vec![ SseBlobSidecar::from_blob_sidecar(blob_1.as_ref()), SseBlobSidecar::from_blob_sidecar(blob_2.as_ref()), diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index ed97b8d634..60d46e8269 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2317,7 +2317,12 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .get_full_block(&wss_block_root) .unwrap() .unwrap(); - let wss_blobs_opt = harness.chain.store.get_blobs(&wss_block_root).unwrap(); + let wss_blobs_opt = harness + .chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); let wss_state = full_store .get_state(&wss_state_root, Some(checkpoint_slot)) .unwrap() @@ -2342,8 +2347,10 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { let kzg = get_kzg(&spec); - let mock = - mock_execution_layer_from_parts(&harness.spec, harness.runtime.task_executor.clone()); + let mock = mock_execution_layer_from_parts( + harness.spec.clone(), + harness.runtime.task_executor.clone(), + ); // Initialise a new beacon chain from the finalized checkpoint. // The slot clock must be set to a time ahead of the checkpoint state. @@ -2388,7 +2395,11 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let store_wss_blobs_opt = beacon_chain.store.get_blobs(&wss_block_root).unwrap(); + let store_wss_blobs_opt = beacon_chain + .store + .get_blobs(&wss_block_root) + .unwrap() + .blobs(); assert_eq!(store_wss_block, wss_block); assert_eq!(store_wss_blobs_opt, wss_blobs_opt); @@ -2407,7 +2418,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .unwrap() .unwrap(); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); let slot = full_block.slot(); let state_root = full_block.state_root(); @@ -2415,7 +2426,7 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { beacon_chain .process_block( full_block.canonical_root(), - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), NotifyExecutionLayer::Yes, BlockImportSource::Lookup, || Ok(()), @@ -2469,13 +2480,13 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { .await .expect("should get block") .expect("should get block"); - let blobs = harness.chain.get_blobs(&block_root).expect("blobs"); + let blobs = harness.chain.get_blobs(&block_root).expect("blobs").blobs(); if let MaybeAvailableBlock::Available(block) = harness .chain .data_availability_checker .verify_kzg_for_rpc_block( - RpcBlock::new(Some(block_root), Arc::new(full_block), Some(blobs)).unwrap(), + RpcBlock::new(Some(block_root), Arc::new(full_block), blobs).unwrap(), ) .expect("should verify kzg") { @@ -3351,7 +3362,7 @@ fn check_blob_existence( .unwrap() .map(Result::unwrap) { - if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap() { + if let Some(blobs) = harness.chain.store.get_blobs(&block_root).unwrap().blobs() { assert!(should_exist, "blobs at slot {slot} exist but should not"); blobs_seen += blobs.len(); } else { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 24c6615822..1cd9e89b96 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -36,7 +36,6 @@ use network::{NetworkConfig, NetworkSenders, NetworkService}; use slasher::Slasher; use slasher_service::SlasherService; use slog::{debug, info, warn, Logger}; -use ssz::Decode; use std::net::TcpListener; use std::path::{Path, PathBuf}; use std::sync::Arc; @@ -361,10 +360,11 @@ where let anchor_block = SignedBeaconBlock::from_ssz_bytes(&anchor_block_bytes, &spec) .map_err(|e| format!("Unable to parse weak subj block SSZ: {:?}", e))?; let anchor_blobs = if anchor_block.message().body().has_blobs() { + let max_blobs_len = spec.max_blobs_per_block(anchor_block.epoch()) as usize; let anchor_blobs_bytes = anchor_blobs_bytes .ok_or("Blobs for checkpoint must be provided using --checkpoint-blobs")?; Some( - BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes) + BlobSidecarList::from_ssz_bytes(&anchor_blobs_bytes, max_blobs_len) .map_err(|e| format!("Unable to parse weak subj blobs SSZ: {e:?}"))?, ) } else { diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 1fd9f81d46..daf2bf6ed4 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -1383,7 +1383,8 @@ mod test { impl Tester { pub fn new(with_auth: bool) -> Self { - let server = MockServer::unit_testing(); + let spec = Arc::new(MainnetEthSpec::default_spec()); + let server = MockServer::unit_testing(spec); let rpc_url = SensitiveUrl::parse(&server.url()).unwrap(); let echo_url = SensitiveUrl::parse(&format!("{}/echo", server.url())).unwrap(); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 2a39796707..9fa375b375 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -154,6 +154,7 @@ pub struct ExecutionBlockGenerator { pub blobs_bundles: HashMap>, pub kzg: Option>, rng: Arc>, + spec: Arc, } fn make_rng() -> Arc> { @@ -172,6 +173,7 @@ impl ExecutionBlockGenerator { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { let mut gen = Self { @@ -192,6 +194,7 @@ impl ExecutionBlockGenerator { blobs_bundles: <_>::default(), kzg, rng: make_rng(), + spec, }; gen.insert_pow_block(0).unwrap(); @@ -697,7 +700,11 @@ impl ExecutionBlockGenerator { if execution_payload.fork_name().deneb_enabled() { // get random number between 0 and Max Blobs let mut rng = self.rng.lock(); - let num_blobs = rng.gen::() % (E::max_blobs_per_block() + 1); + let max_blobs = self + .spec + .max_blobs_per_block_by_fork(execution_payload.fork_name()) + as usize; + let num_blobs = rng.gen::() % (max_blobs + 1); let (bundle, transactions) = generate_blobs(num_blobs)?; for tx in Vec::from(transactions) { execution_payload @@ -906,6 +913,7 @@ mod test { const TERMINAL_DIFFICULTY: u64 = 10; const TERMINAL_BLOCK: u64 = 10; const DIFFICULTY_INCREMENT: u64 = 1; + let spec = Arc::new(MainnetEthSpec::default_spec()); let mut generator: ExecutionBlockGenerator = ExecutionBlockGenerator::new( Uint256::from(TERMINAL_DIFFICULTY), @@ -915,6 +923,7 @@ mod test { None, None, None, + spec, None, ); diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 9df8d9cc5c..f45bfda9ff 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -13,7 +13,7 @@ pub struct MockExecutionLayer { pub server: MockServer, pub el: ExecutionLayer, pub executor: TaskExecutor, - pub spec: ChainSpec, + pub spec: Arc, } impl MockExecutionLayer { @@ -30,7 +30,7 @@ impl MockExecutionLayer { None, None, Some(JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap()), - spec, + Arc::new(spec), None, ) } @@ -44,7 +44,7 @@ impl MockExecutionLayer { prague_time: Option, osaka_time: Option, jwt_key: Option, - spec: ChainSpec, + spec: Arc, kzg: Option>, ) -> Self { let handle = executor.handle().unwrap(); @@ -60,6 +60,7 @@ impl MockExecutionLayer { cancun_time, prague_time, osaka_time, + spec.clone(), kzg, ); @@ -323,7 +324,7 @@ impl MockExecutionLayer { pub async fn with_terminal_block(self, func: U) -> Self where - U: Fn(ChainSpec, ExecutionLayer, Option) -> V, + U: Fn(Arc, ExecutionLayer, Option) -> V, V: Future, { let terminal_block_number = self diff --git a/beacon_node/execution_layer/src/test_utils/mod.rs b/beacon_node/execution_layer/src/test_utils/mod.rs index 5934c069a2..75ff435886 100644 --- a/beacon_node/execution_layer/src/test_utils/mod.rs +++ b/beacon_node/execution_layer/src/test_utils/mod.rs @@ -21,7 +21,7 @@ use std::marker::PhantomData; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::sync::{Arc, LazyLock}; use tokio::{runtime, sync::oneshot}; -use types::{EthSpec, ExecutionBlockHash, Uint256}; +use types::{ChainSpec, EthSpec, ExecutionBlockHash, Uint256}; use warp::{http::StatusCode, Filter, Rejection}; use crate::EngineCapabilities; @@ -111,7 +111,7 @@ pub struct MockServer { } impl MockServer { - pub fn unit_testing() -> Self { + pub fn unit_testing(chain_spec: Arc) -> Self { Self::new( &runtime::Handle::current(), JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(), @@ -122,6 +122,7 @@ impl MockServer { None, // FIXME(deneb): should this be the default? None, // FIXME(electra): should this be the default? None, // FIXME(fulu): should this be the default? + chain_spec, None, ) } @@ -129,6 +130,7 @@ impl MockServer { pub fn new_with_config( handle: &runtime::Handle, config: MockExecutionConfig, + spec: Arc, kzg: Option>, ) -> Self { let MockExecutionConfig { @@ -152,6 +154,7 @@ impl MockServer { cancun_time, prague_time, osaka_time, + spec, kzg, ); @@ -216,6 +219,7 @@ impl MockServer { cancun_time: Option, prague_time: Option, osaka_time: Option, + spec: Arc, kzg: Option>, ) -> Self { Self::new_with_config( @@ -231,6 +235,7 @@ impl MockServer { prague_time, osaka_time, }, + spec, kzg, ) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index b9e4883318..0b00958f26 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -287,14 +287,16 @@ impl BlockId { })?; // Return the `BlobSidecarList` identified by `self`. + let max_blobs_per_block = chain.spec.max_blobs_per_block(block.epoch()) as usize; let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? } else { - Self::get_blobs(chain, root, query.indices)? + Self::get_blobs(chain, root, query.indices, max_blobs_per_block)? } } else { - BlobSidecarList::default() + BlobSidecarList::new(vec![], max_blobs_per_block) + .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? }; Ok((block, blob_sidecar_list, execution_optimistic, finalized)) @@ -304,22 +306,25 @@ impl BlockId { chain: &BeaconChain, root: Hash256, indices: Option>, + max_blobs_per_block: usize, ) -> Result, Rejection> { let blob_sidecar_list = chain .store .get_blobs(&root) .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .blobs() .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) })?; let blob_sidecar_list_filtered = match indices { Some(vec) => { - let list = blob_sidecar_list + let list: Vec<_> = blob_sidecar_list .into_iter() .filter(|blob_sidecar| vec.contains(&blob_sidecar.index)) .collect(); - BlobSidecarList::new(list) + + BlobSidecarList::new(list, max_blobs_per_block) .map_err(|e| warp_utils::reject::custom_server_error(format!("{:?}", e)))? } None => blob_sidecar_list, @@ -356,11 +361,13 @@ impl BlockId { ) .collect::, _>>()?; - reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { - warp_utils::reject::custom_server_error(format!( - "Error reconstructing data columns: {e:?}" - )) - }) + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block, &chain.spec).map_err( + |e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }, + ) } else { Err(warp_utils::reject::custom_server_error( format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") diff --git a/beacon_node/http_api/tests/broadcast_validation_tests.rs b/beacon_node/http_api/tests/broadcast_validation_tests.rs index 8e0a51a32a..db4ef00257 100644 --- a/beacon_node/http_api/tests/broadcast_validation_tests.rs +++ b/beacon_node/http_api/tests/broadcast_validation_tests.rs @@ -1460,7 +1460,8 @@ pub async fn block_seen_on_gossip_with_some_blobs() { let blobs = blobs.expect("should have some blobs"); assert!( blobs.0.len() >= 2, - "need at least 2 blobs for partial reveal" + "need at least 2 blobs for partial reveal, got: {}", + blobs.0.len() ); let partial_kzg_proofs = vec![*blobs.0.first().unwrap()]; diff --git a/beacon_node/lighthouse_network/src/rpc/codec.rs b/beacon_node/lighthouse_network/src/rpc/codec.rs index c3d20bbfb1..61b2699ac5 100644 --- a/beacon_node/lighthouse_network/src/rpc/codec.rs +++ b/beacon_node/lighthouse_network/src/rpc/codec.rs @@ -186,6 +186,7 @@ impl Decoder for SSZSnappyInboundCodec { handle_rpc_request( self.protocol.versioned_protocol, &decoded_buffer, + self.fork_context.current_fork(), &self.fork_context.spec, ) } @@ -555,6 +556,7 @@ fn handle_length( fn handle_rpc_request( versioned_protocol: SupportedProtocol, decoded_buffer: &[u8], + current_fork: ForkName, spec: &ChainSpec, ) -> Result>, RPCError> { match versioned_protocol { @@ -586,9 +588,23 @@ fn handle_rpc_request( )?, }), ))), - SupportedProtocol::BlobsByRangeV1 => Ok(Some(RequestType::BlobsByRange( - BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?, - ))), + SupportedProtocol::BlobsByRangeV1 => { + let req = BlobsByRangeRequest::from_ssz_bytes(decoded_buffer)?; + let max_requested_blobs = req + .count + .saturating_mul(spec.max_blobs_per_block_by_fork(current_fork)); + // TODO(pawan): change this to max_blobs_per_rpc_request in the alpha10 PR + if max_requested_blobs > spec.max_request_blob_sidecars { + return Err(RPCError::ErrorResponse( + RpcErrorResponse::InvalidRequest, + format!( + "requested exceeded limit. allowed: {}, requested: {}", + spec.max_request_blob_sidecars, max_requested_blobs + ), + )); + } + Ok(Some(RequestType::BlobsByRange(req))) + } SupportedProtocol::BlobsByRootV1 => { Ok(Some(RequestType::BlobsByRoot(BlobsByRootRequest { blob_ids: RuntimeVariableList::from_ssz_bytes( diff --git a/beacon_node/lighthouse_network/src/rpc/config.rs b/beacon_node/lighthouse_network/src/rpc/config.rs index 7b3a59eac7..75d49e9cb5 100644 --- a/beacon_node/lighthouse_network/src/rpc/config.rs +++ b/beacon_node/lighthouse_network/src/rpc/config.rs @@ -110,8 +110,8 @@ impl RateLimiterConfig { pub const DEFAULT_BLOCKS_BY_RANGE_QUOTA: Quota = Quota::n_every(128, 10); pub const DEFAULT_BLOCKS_BY_ROOT_QUOTA: Quota = Quota::n_every(128, 10); // `DEFAULT_BLOCKS_BY_RANGE_QUOTA` * (target + 1) to account for high usage - pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(512, 10); - pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(512, 10); + pub const DEFAULT_BLOBS_BY_RANGE_QUOTA: Quota = Quota::n_every(896, 10); + pub const DEFAULT_BLOBS_BY_ROOT_QUOTA: Quota = Quota::n_every(896, 10); // 320 blocks worth of columns for regular node, or 40 blocks for supernode. // Range sync load balances when requesting blocks, and each batch is 32 blocks. pub const DEFAULT_DATA_COLUMNS_BY_RANGE_QUOTA: Quota = Quota::n_every(5120, 10); diff --git a/beacon_node/lighthouse_network/src/rpc/handler.rs b/beacon_node/lighthouse_network/src/rpc/handler.rs index 0a0a6ca754..3a008df023 100644 --- a/beacon_node/lighthouse_network/src/rpc/handler.rs +++ b/beacon_node/lighthouse_network/src/rpc/handler.rs @@ -855,7 +855,8 @@ where } let (req, substream) = substream; - let max_responses = req.max_responses(); + let max_responses = + req.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); // store requests that expect responses if max_responses > 0 { @@ -924,7 +925,8 @@ where } // add the stream to substreams if we expect a response, otherwise drop the stream. - let max_responses = request.max_responses(); + let max_responses = + request.max_responses(self.fork_context.current_fork(), &self.fork_context.spec); if max_responses > 0 { let max_remaining_chunks = if request.expect_exactly_one_response() { // Currently enforced only for multiple responses diff --git a/beacon_node/lighthouse_network/src/rpc/methods.rs b/beacon_node/lighthouse_network/src/rpc/methods.rs index bb8bfb0e20..500188beef 100644 --- a/beacon_node/lighthouse_network/src/rpc/methods.rs +++ b/beacon_node/lighthouse_network/src/rpc/methods.rs @@ -15,6 +15,7 @@ use strum::IntoStaticStr; use superstruct::superstruct; use types::blob_sidecar::BlobIdentifier; use types::light_client_update::MAX_REQUEST_LIGHT_CLIENT_UPDATES; +use types::ForkName; use types::{ blob_sidecar::BlobSidecar, ChainSpec, ColumnIndex, DataColumnIdentifier, DataColumnSidecar, Epoch, EthSpec, Hash256, LightClientBootstrap, LightClientFinalityUpdate, @@ -327,8 +328,9 @@ pub struct BlobsByRangeRequest { } impl BlobsByRangeRequest { - pub fn max_blobs_requested(&self) -> u64 { - self.count.saturating_mul(E::max_blobs_per_block() as u64) + pub fn max_blobs_requested(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + let max_blobs_per_block = spec.max_blobs_per_block_by_fork(current_fork); + self.count.saturating_mul(max_blobs_per_block) } } diff --git a/beacon_node/lighthouse_network/src/rpc/mod.rs b/beacon_node/lighthouse_network/src/rpc/mod.rs index 7d091da766..03f1395b8b 100644 --- a/beacon_node/lighthouse_network/src/rpc/mod.rs +++ b/beacon_node/lighthouse_network/src/rpc/mod.rs @@ -181,12 +181,13 @@ impl RPC { let inbound_limiter = inbound_rate_limiter_config.map(|config| { debug!(log, "Using inbound rate limiting params"; "config" => ?config); - RateLimiter::new_with_config(config.0) + RateLimiter::new_with_config(config.0, fork_context.clone()) .expect("Inbound limiter configuration parameters are valid") }); let self_limiter = outbound_rate_limiter_config.map(|config| { - SelfRateLimiter::new(config, log.clone()).expect("Configuration parameters are valid") + SelfRateLimiter::new(config, fork_context.clone(), log.clone()) + .expect("Configuration parameters are valid") }); RPC { diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 87bde58292..681b739d59 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -86,6 +86,10 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD: LazyLock = LazyL /// We calculate the value from its fields instead of constructing the block and checking the length. /// Note: This is only the theoretical upper bound. We further bound the max size we receive over the network /// with `max_chunk_size`. +/// +/// FIXME: Given that these limits are useless we should probably delete them. See: +/// +/// https://github.com/sigp/lighthouse/issues/6790 pub static SIGNED_BEACON_BLOCK_BELLATRIX_MAX: LazyLock = LazyLock::new(|| // Size of a full altair block *SIGNED_BEACON_BLOCK_ALTAIR_MAX @@ -102,7 +106,6 @@ pub static SIGNED_BEACON_BLOCK_DENEB_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_CAPELLA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_deneb_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional offsets for the `ExecutionPayload` - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. // @@ -110,7 +113,6 @@ pub static SIGNED_BEACON_BLOCK_ELECTRA_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_ELECTRA_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_electra_size() // adding max size of execution payload (~16gb) + ssz::BYTES_PER_LENGTH_OFFSET // Adding the additional ssz offset for the `ExecutionPayload` field - + (::ssz_fixed_len() * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); // Length offset for the blob commitments field. @@ -118,8 +120,6 @@ pub static SIGNED_BEACON_BLOCK_FULU_MAX: LazyLock = LazyLock::new(|| { *SIGNED_BEACON_BLOCK_FULU_MAX_WITHOUT_PAYLOAD + types::ExecutionPayload::::max_execution_payload_fulu_size() + ssz::BYTES_PER_LENGTH_OFFSET - + (::ssz_fixed_len() - * ::max_blobs_per_block()) + ssz::BYTES_PER_LENGTH_OFFSET }); @@ -129,14 +129,6 @@ pub static BLOB_SIDECAR_SIZE: LazyLock = pub static BLOB_SIDECAR_SIZE_MINIMAL: LazyLock = LazyLock::new(BlobSidecar::::max_size); -pub static DATA_COLUMNS_SIDECAR_MIN: LazyLock = LazyLock::new(|| { - DataColumnSidecar::::empty() - .as_ssz_bytes() - .len() -}); -pub static DATA_COLUMNS_SIDECAR_MAX: LazyLock = - LazyLock::new(DataColumnSidecar::::max_size); - pub static ERROR_TYPE_MIN: LazyLock = LazyLock::new(|| { VariableList::::from(Vec::::new()) .as_ssz_bytes() @@ -635,8 +627,10 @@ impl ProtocolId { Protocol::BlocksByRoot => rpc_block_limits_by_fork(fork_context.current_fork()), Protocol::BlobsByRange => rpc_blob_limits::(), Protocol::BlobsByRoot => rpc_blob_limits::(), - Protocol::DataColumnsByRoot => rpc_data_column_limits(), - Protocol::DataColumnsByRange => rpc_data_column_limits(), + Protocol::DataColumnsByRoot => rpc_data_column_limits::(fork_context.current_fork()), + Protocol::DataColumnsByRange => { + rpc_data_column_limits::(fork_context.current_fork()) + } Protocol::Ping => RpcLimits::new( ::ssz_fixed_len(), ::ssz_fixed_len(), @@ -716,8 +710,14 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -pub fn rpc_data_column_limits() -> RpcLimits { - RpcLimits::new(*DATA_COLUMNS_SIDECAR_MIN, *DATA_COLUMNS_SIDECAR_MAX) +// TODO(peerdas): fix hardcoded max here +pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { + RpcLimits::new( + DataColumnSidecar::::empty().as_ssz_bytes().len(), + DataColumnSidecar::::max_size( + E::default_spec().max_blobs_per_block_by_fork(fork_name) as usize + ), + ) } /* Inbound upgrade */ @@ -815,13 +815,13 @@ impl RequestType { /* These functions are used in the handler for stream management */ /// Maximum number of responses expected for this request. - pub fn max_responses(&self) -> u64 { + pub fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { match self { RequestType::Status(_) => 1, RequestType::Goodbye(_) => 0, RequestType::BlocksByRange(req) => *req.count(), RequestType::BlocksByRoot(req) => req.block_roots().len() as u64, - RequestType::BlobsByRange(req) => req.max_blobs_requested::(), + RequestType::BlobsByRange(req) => req.max_blobs_requested(current_fork, spec), RequestType::BlobsByRoot(req) => req.blob_ids.len() as u64, RequestType::DataColumnsByRoot(req) => req.data_column_ids.len() as u64, RequestType::DataColumnsByRange(req) => req.max_requested::(), diff --git a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs index ecbacc8c11..b9e82a5f1e 100644 --- a/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/rate_limiter.rs @@ -6,10 +6,11 @@ use serde::{Deserialize, Serialize}; use std::future::Future; use std::hash::Hash; use std::pin::Pin; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; use tokio::time::Interval; -use types::EthSpec; +use types::{ChainSpec, EthSpec, ForkContext, ForkName}; /// Nanoseconds since a given time. // Maintained as u64 to reduce footprint @@ -109,6 +110,7 @@ pub struct RPCRateLimiter { lc_finality_update_rl: Limiter, /// LightClientUpdatesByRange rate limiter. lc_updates_by_range_rl: Limiter, + fork_context: Arc, } /// Error type for non conformant requests @@ -176,7 +178,7 @@ impl RPCRateLimiterBuilder { self } - pub fn build(self) -> Result { + pub fn build(self, fork_context: Arc) -> Result { // get our quotas let ping_quota = self.ping_quota.ok_or("Ping quota not specified")?; let metadata_quota = self.metadata_quota.ok_or("MetaData quota not specified")?; @@ -253,13 +255,14 @@ impl RPCRateLimiterBuilder { lc_finality_update_rl, lc_updates_by_range_rl, init_time: Instant::now(), + fork_context, }) } } pub trait RateLimiterItem { fn protocol(&self) -> Protocol; - fn max_responses(&self) -> u64; + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64; } impl RateLimiterItem for super::RequestType { @@ -267,13 +270,16 @@ impl RateLimiterItem for super::RequestType { self.versioned_protocol().protocol() } - fn max_responses(&self) -> u64 { - self.max_responses() + fn max_responses(&self, current_fork: ForkName, spec: &ChainSpec) -> u64 { + self.max_responses(current_fork, spec) } } impl RPCRateLimiter { - pub fn new_with_config(config: RateLimiterConfig) -> Result { + pub fn new_with_config( + config: RateLimiterConfig, + fork_context: Arc, + ) -> Result { // Destructure to make sure every configuration value is used. let RateLimiterConfig { ping_quota, @@ -316,7 +322,7 @@ impl RPCRateLimiter { Protocol::LightClientUpdatesByRange, light_client_updates_by_range_quota, ) - .build() + .build(fork_context) } /// Get a builder instance. @@ -330,7 +336,9 @@ impl RPCRateLimiter { request: &Item, ) -> Result<(), RateLimitedErr> { let time_since_start = self.init_time.elapsed(); - let tokens = request.max_responses().max(1); + let tokens = request + .max_responses(self.fork_context.current_fork(), &self.fork_context.spec) + .max(1); let check = |limiter: &mut Limiter| limiter.allows(time_since_start, peer_id, tokens); diff --git a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs index e968ad11e3..e0c8593f29 100644 --- a/beacon_node/lighthouse_network/src/rpc/self_limiter.rs +++ b/beacon_node/lighthouse_network/src/rpc/self_limiter.rs @@ -1,5 +1,6 @@ use std::{ collections::{hash_map::Entry, HashMap, VecDeque}, + sync::Arc, task::{Context, Poll}, time::Duration, }; @@ -9,7 +10,7 @@ use libp2p::{swarm::NotifyHandler, PeerId}; use slog::{crit, debug, Logger}; use smallvec::SmallVec; use tokio_util::time::DelayQueue; -use types::EthSpec; +use types::{EthSpec, ForkContext}; use super::{ config::OutboundRateLimiterConfig, @@ -50,9 +51,13 @@ pub enum Error { impl SelfRateLimiter { /// Creates a new [`SelfRateLimiter`] based on configration values. - pub fn new(config: OutboundRateLimiterConfig, log: Logger) -> Result { + pub fn new( + config: OutboundRateLimiterConfig, + fork_context: Arc, + log: Logger, + ) -> Result { debug!(log, "Using self rate limiting params"; "config" => ?config); - let limiter = RateLimiter::new_with_config(config.0)?; + let limiter = RateLimiter::new_with_config(config.0, fork_context)?; Ok(SelfRateLimiter { delayed_requests: Default::default(), @@ -215,7 +220,7 @@ mod tests { use crate::service::api_types::{AppRequestId, RequestId, SyncRequestId}; use libp2p::PeerId; use std::time::Duration; - use types::MainnetEthSpec; + use types::{EthSpec, ForkContext, Hash256, MainnetEthSpec, Slot}; /// Test that `next_peer_request_ready` correctly maintains the queue. #[tokio::test] @@ -225,8 +230,13 @@ mod tests { ping_quota: Quota::n_every(1, 2), ..Default::default() }); + let fork_context = std::sync::Arc::new(ForkContext::new::( + Slot::new(0), + Hash256::ZERO, + &MainnetEthSpec::default_spec(), + )); let mut limiter: SelfRateLimiter = - SelfRateLimiter::new(config, log).unwrap(); + SelfRateLimiter::new(config, fork_context, log).unwrap(); let peer_id = PeerId::random(); for i in 1..=5u32 { diff --git a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs index c4944078fe..b4f19f668d 100644 --- a/beacon_node/network/src/network_beacon_processor/rpc_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/rpc_methods.rs @@ -890,14 +890,6 @@ impl NetworkBeaconProcessor { "start_slot" => req.start_slot, ); - // Should not send more than max request blocks - if req.max_blobs_requested::() > self.chain.spec.max_request_blob_sidecars { - return Err(( - RpcErrorResponse::InvalidRequest, - "Request exceeded `MAX_REQUEST_BLOBS_SIDECARS`", - )); - } - let request_start_slot = Slot::from(req.start_slot); let data_availability_boundary_slot = match self.chain.data_availability_boundary() { diff --git a/beacon_node/network/src/network_beacon_processor/tests.rs b/beacon_node/network/src/network_beacon_processor/tests.rs index 7e27a91bd6..8238fa146d 100644 --- a/beacon_node/network/src/network_beacon_processor/tests.rs +++ b/beacon_node/network/src/network_beacon_processor/tests.rs @@ -259,7 +259,7 @@ impl TestRig { assert!(beacon_processor.is_ok()); let block = next_block_tuple.0; let blob_sidecars = if let Some((kzg_proofs, blobs)) = next_block_tuple.1 { - Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs).unwrap()) + Some(BlobSidecar::build_sidecars(blobs, &block, kzg_proofs, &chain.spec).unwrap()) } else { None }; @@ -344,7 +344,7 @@ impl TestRig { } pub fn enqueue_single_lookup_rpc_blobs(&self) { if let Some(blobs) = self.next_blobs.clone() { - let blobs = FixedBlobSidecarList::from(blobs.into_iter().map(Some).collect::>()); + let blobs = FixedBlobSidecarList::new(blobs.into_iter().map(Some).collect::>()); self.network_beacon_processor .send_rpc_blobs( self.next_block.canonical_root(), @@ -1130,7 +1130,12 @@ async fn test_blobs_by_range() { .block_root_at_slot(Slot::new(slot), WhenSlotSkipped::None) .unwrap(); blob_count += root - .map(|root| rig.chain.get_blobs(&root).unwrap_or_default().len()) + .map(|root| { + rig.chain + .get_blobs(&root) + .map(|list| list.len()) + .unwrap_or(0) + }) .unwrap_or(0); } let mut actual_count = 0; diff --git a/beacon_node/network/src/sync/block_sidecar_coupling.rs b/beacon_node/network/src/sync/block_sidecar_coupling.rs index 966ce55fab..7a234eaef0 100644 --- a/beacon_node/network/src/sync/block_sidecar_coupling.rs +++ b/beacon_node/network/src/sync/block_sidecar_coupling.rs @@ -2,13 +2,13 @@ use beacon_chain::{ block_verification_types::RpcBlock, data_column_verification::CustodyDataColumn, get_block_root, }; use lighthouse_network::PeerId; -use ssz_types::VariableList; use std::{ collections::{HashMap, VecDeque}, sync::Arc, }; use types::{ - BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, SignedBeaconBlock, + BlobSidecar, ChainSpec, ColumnIndex, DataColumnSidecar, EthSpec, Hash256, RuntimeVariableList, + SignedBeaconBlock, }; #[derive(Debug)] @@ -31,6 +31,7 @@ pub struct RangeBlockComponentsRequest { num_custody_column_requests: Option, /// The peers the request was made to. pub(crate) peer_ids: Vec, + max_blobs_per_block: usize, } impl RangeBlockComponentsRequest { @@ -39,6 +40,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns: Option>, num_custody_column_requests: Option, peer_ids: Vec, + max_blobs_per_block: usize, ) -> Self { Self { blocks: <_>::default(), @@ -51,6 +53,7 @@ impl RangeBlockComponentsRequest { expects_custody_columns, num_custody_column_requests, peer_ids, + max_blobs_per_block, } } @@ -100,7 +103,7 @@ impl RangeBlockComponentsRequest { let mut responses = Vec::with_capacity(blocks.len()); let mut blob_iter = blobs.into_iter().peekable(); for block in blocks.into_iter() { - let mut blob_list = Vec::with_capacity(E::max_blobs_per_block()); + let mut blob_list = Vec::with_capacity(self.max_blobs_per_block); while { let pair_next_blob = blob_iter .peek() @@ -111,7 +114,7 @@ impl RangeBlockComponentsRequest { blob_list.push(blob_iter.next().ok_or("Missing next blob".to_string())?); } - let mut blobs_buffer = vec![None; E::max_blobs_per_block()]; + let mut blobs_buffer = vec![None; self.max_blobs_per_block]; for blob in blob_list { let blob_index = blob.index as usize; let Some(blob_opt) = blobs_buffer.get_mut(blob_index) else { @@ -123,7 +126,11 @@ impl RangeBlockComponentsRequest { *blob_opt = Some(blob); } } - let blobs = VariableList::from(blobs_buffer.into_iter().flatten().collect::>()); + let blobs = RuntimeVariableList::new( + blobs_buffer.into_iter().flatten().collect::>(), + self.max_blobs_per_block, + ) + .map_err(|_| "Blobs returned exceeds max length".to_string())?; responses.push(RpcBlock::new(None, block, Some(blobs)).map_err(|e| format!("{e:?}"))?) } @@ -245,12 +252,18 @@ mod tests { #[test] fn no_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) - .map(|_| generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng).0) + .map(|_| { + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec) + .0 + }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(false, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -265,15 +278,24 @@ mod tests { #[test] fn empty_blobs_into_responses() { + let spec = test_spec::(); let peer_id = PeerId::random(); - let mut info = RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id]); let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { // Always generate some blobs. - generate_rand_block_and_blobs::(ForkName::Deneb, NumBlobs::Number(3), &mut rng).0 + generate_rand_block_and_blobs::( + ForkName::Deneb, + NumBlobs::Number(3), + &mut rng, + &spec, + ) + .0 }) .collect::>(); + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().epoch()) as usize; + let mut info = + RangeBlockComponentsRequest::::new(true, None, None, vec![peer_id], max_len); // Send blocks and complete terminate response for block in blocks { @@ -294,12 +316,7 @@ mod tests { fn rpc_block_with_custody_columns() { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(expects_custody_columns.len()), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -311,7 +328,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(expects_custody_columns.len()), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); @@ -355,12 +379,7 @@ mod tests { let spec = test_spec::(); let expects_custody_columns = vec![1, 2, 3, 4]; let num_of_data_column_requests = 2; - let mut info = RangeBlockComponentsRequest::::new( - false, - Some(expects_custody_columns.clone()), - Some(num_of_data_column_requests), - vec![PeerId::random()], - ); + let mut rng = XorShiftRng::from_seed([42; 16]); let blocks = (0..4) .map(|_| { @@ -372,7 +391,14 @@ mod tests { ) }) .collect::>(); - + let max_len = spec.max_blobs_per_block(blocks.first().unwrap().0.epoch()) as usize; + let mut info = RangeBlockComponentsRequest::::new( + false, + Some(expects_custody_columns.clone()), + Some(num_of_data_column_requests), + vec![PeerId::random()], + max_len, + ); // Send blocks and complete terminate response for block in &blocks { info.add_block_response(Some(block.0.clone().into())); diff --git a/beacon_node/network/src/sync/manager.rs b/beacon_node/network/src/sync/manager.rs index 5d02be2b4c..2df8b5f94c 100644 --- a/beacon_node/network/src/sync/manager.rs +++ b/beacon_node/network/src/sync/manager.rs @@ -1234,6 +1234,7 @@ impl SyncManager { .network .range_block_and_blob_response(id, block_or_blob) { + let epoch = resp.sender_id.batch_id(); match resp.responses { Ok(blocks) => { match resp.sender_id { @@ -1277,6 +1278,7 @@ impl SyncManager { resp.expects_custody_columns, None, vec![], + self.chain.spec.max_blobs_per_block(epoch) as usize, ), ); // inform range that the request needs to be treated as failed diff --git a/beacon_node/network/src/sync/network_context.rs b/beacon_node/network/src/sync/network_context.rs index b6b7b315f3..e1b2b974ec 100644 --- a/beacon_node/network/src/sync/network_context.rs +++ b/beacon_node/network/src/sync/network_context.rs @@ -67,6 +67,15 @@ pub enum RangeRequestId { }, } +impl RangeRequestId { + pub fn batch_id(&self) -> BatchId { + match self { + RangeRequestId::RangeSync { batch_id, .. } => *batch_id, + RangeRequestId::BackfillSync { batch_id, .. } => *batch_id, + } + } +} + #[derive(Debug)] pub enum RpcEvent { StreamTermination, @@ -445,11 +454,14 @@ impl SyncNetworkContext { (None, None) }; + // TODO(pawan): this would break if a batch contains multiple epochs + let max_blobs_len = self.chain.spec.max_blobs_per_block(epoch); let info = RangeBlockComponentsRequest::new( expected_blobs, expects_columns, num_of_column_req, requested_peers, + max_blobs_len as usize, ); self.range_block_components_requests .insert(id, (sender_id, info)); @@ -950,12 +962,23 @@ impl SyncNetworkContext { ) -> Option>> { let response = self.blobs_by_root_requests.on_response(id, rpc_event); let response = response.map(|res| { - res.and_then( - |(blobs, seen_timestamp)| match to_fixed_blob_sidecar_list(blobs) { - Ok(blobs) => Ok((blobs, seen_timestamp)), - Err(e) => Err(e.into()), - }, - ) + res.and_then(|(blobs, seen_timestamp)| { + if let Some(max_len) = blobs + .first() + .map(|blob| self.chain.spec.max_blobs_per_block(blob.epoch()) as usize) + { + match to_fixed_blob_sidecar_list(blobs, max_len) { + Ok(blobs) => Ok((blobs, seen_timestamp)), + Err(e) => Err(e.into()), + } + } else { + Err(RpcResponseError::VerifyError( + LookupVerifyError::InternalError( + "Requested blobs for a block that has no blobs".to_string(), + ), + )) + } + }) }); if let Some(Err(RpcResponseError::VerifyError(e))) = &response { self.report_peer(peer_id, PeerAction::LowToleranceError, e.into()); @@ -1150,8 +1173,9 @@ impl SyncNetworkContext { fn to_fixed_blob_sidecar_list( blobs: Vec>>, + max_len: usize, ) -> Result, LookupVerifyError> { - let mut fixed_list = FixedBlobSidecarList::default(); + let mut fixed_list = FixedBlobSidecarList::new(vec![None; max_len]); for blob in blobs.into_iter() { let index = blob.index as usize; *fixed_list diff --git a/beacon_node/network/src/sync/network_context/requests.rs b/beacon_node/network/src/sync/network_context/requests.rs index b9214bafcd..4a5a16459d 100644 --- a/beacon_node/network/src/sync/network_context/requests.rs +++ b/beacon_node/network/src/sync/network_context/requests.rs @@ -28,6 +28,7 @@ pub enum LookupVerifyError { UnrequestedIndex(u64), InvalidInclusionProof, DuplicateData, + InternalError(String), } /// Collection of active requests of a single ReqResp method, i.e. `blocks_by_root` diff --git a/beacon_node/network/src/sync/tests/lookups.rs b/beacon_node/network/src/sync/tests/lookups.rs index a43b3bd022..b9e38237c5 100644 --- a/beacon_node/network/src/sync/tests/lookups.rs +++ b/beacon_node/network/src/sync/tests/lookups.rs @@ -119,6 +119,8 @@ impl TestRig { .network_globals .set_sync_state(SyncState::Synced); + let spec = chain.spec.clone(); + let rng = XorShiftRng::from_seed([42; 16]); TestRig { beacon_processor_rx, @@ -142,6 +144,7 @@ impl TestRig { harness, fork_name, log, + spec, } } @@ -213,7 +216,7 @@ impl TestRig { ) -> (SignedBeaconBlock, Vec>) { let fork_name = self.fork_name; let rng = &mut self.rng; - generate_rand_block_and_blobs::(fork_name, num_blobs, rng) + generate_rand_block_and_blobs::(fork_name, num_blobs, rng, &self.spec) } fn rand_block_and_data_columns( @@ -1328,8 +1331,10 @@ impl TestRig { #[test] fn stable_rng() { + let spec = types::MainnetEthSpec::default_spec(); let mut rng = XorShiftRng::from_seed([42; 16]); - let (block, _) = generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng); + let (block, _) = + generate_rand_block_and_blobs::(ForkName::Base, NumBlobs::None, &mut rng, &spec); assert_eq!( block.canonical_root(), Hash256::from_slice( @@ -2187,8 +2192,8 @@ mod deneb_only { block_verification_types::{AsBlock, RpcBlock}, data_availability_checker::AvailabilityCheckError, }; - use ssz_types::VariableList; use std::collections::VecDeque; + use types::RuntimeVariableList; struct DenebTester { rig: TestRig, @@ -2546,12 +2551,15 @@ mod deneb_only { fn parent_block_unknown_parent(mut self) -> Self { self.rig.log("parent_block_unknown_parent"); let block = self.unknown_parent_block.take().unwrap(); + let max_len = self.rig.spec.max_blobs_per_block(block.epoch()) as usize; // Now this block is the one we expect requests from self.block = block.clone(); let block = RpcBlock::new( Some(block.canonical_root()), block, - self.unknown_parent_blobs.take().map(VariableList::from), + self.unknown_parent_blobs + .take() + .map(|vec| RuntimeVariableList::from_vec(vec, max_len)), ) .unwrap(); self.rig.parent_block_processed( diff --git a/beacon_node/network/src/sync/tests/mod.rs b/beacon_node/network/src/sync/tests/mod.rs index 47666b413c..6ed5c7f8fa 100644 --- a/beacon_node/network/src/sync/tests/mod.rs +++ b/beacon_node/network/src/sync/tests/mod.rs @@ -12,7 +12,7 @@ use slot_clock::ManualSlotClock; use std::sync::Arc; use store::MemoryStore; use tokio::sync::mpsc; -use types::{test_utils::XorShiftRng, ForkName, MinimalEthSpec as E}; +use types::{test_utils::XorShiftRng, ChainSpec, ForkName, MinimalEthSpec as E}; mod lookups; mod range; @@ -64,4 +64,5 @@ struct TestRig { rng: XorShiftRng, fork_name: ForkName, log: Logger, + spec: Arc, } diff --git a/beacon_node/store/src/blob_sidecar_list_from_root.rs b/beacon_node/store/src/blob_sidecar_list_from_root.rs new file mode 100644 index 0000000000..de63eaa76c --- /dev/null +++ b/beacon_node/store/src/blob_sidecar_list_from_root.rs @@ -0,0 +1,42 @@ +use std::sync::Arc; +use types::{BlobSidecar, BlobSidecarList, EthSpec}; + +#[derive(Debug, Clone)] +pub enum BlobSidecarListFromRoot { + /// Valid root that exists in the DB, but has no blobs associated with it. + NoBlobs, + /// Contains > 1 blob for the requested root. + Blobs(BlobSidecarList), + /// No root exists in the db or cache for the requested root. + NoRoot, +} + +impl From> for BlobSidecarListFromRoot { + fn from(value: BlobSidecarList) -> Self { + Self::Blobs(value) + } +} + +impl BlobSidecarListFromRoot { + pub fn blobs(self) -> Option> { + match self { + Self::NoBlobs | Self::NoRoot => None, + Self::Blobs(blobs) => Some(blobs), + } + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + match self { + Self::NoBlobs | Self::NoRoot => 0, + Self::Blobs(blobs) => blobs.len(), + } + } + + pub fn iter(&self) -> impl Iterator>> { + match self { + Self::NoBlobs | Self::NoRoot => [].iter(), + Self::Blobs(list) => list.iter(), + } + } +} diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index c6148e5314..c29305f983 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -14,8 +14,8 @@ use crate::metadata::{ }; use crate::state_cache::{PutStateOutcome, StateCache}; use crate::{ - get_data_column_key, get_key_for_col, DBColumn, DatabaseBlock, Error, ItemStore, - KeyValueStoreOp, StoreItem, StoreOp, + get_data_column_key, get_key_for_col, BlobSidecarListFromRoot, DBColumn, DatabaseBlock, Error, + ItemStore, KeyValueStoreOp, StoreItem, StoreOp, }; use crate::{metrics, parse_data_column_key}; use itertools::{process_results, Itertools}; @@ -1280,9 +1280,10 @@ impl, Cold: ItemStore> HotColdDB StoreOp::PutBlobs(_, _) | StoreOp::PutDataColumns(_, _) => true, StoreOp::DeleteBlobs(block_root) => { match self.get_blobs(block_root) { - Ok(Some(blob_sidecar_list)) => { + Ok(BlobSidecarListFromRoot::Blobs(blob_sidecar_list)) => { blobs_to_delete.push((*block_root, blob_sidecar_list)); } + Ok(BlobSidecarListFromRoot::NoBlobs | BlobSidecarListFromRoot::NoRoot) => {} Err(e) => { error!( self.log, "Error getting blobs"; @@ -1290,7 +1291,6 @@ impl, Cold: ItemStore> HotColdDB "error" => ?e ); } - _ => (), } true } @@ -2045,11 +2045,11 @@ impl, Cold: ItemStore> HotColdDB } /// Fetch blobs for a given block from the store. - pub fn get_blobs(&self, block_root: &Hash256) -> Result>, Error> { + pub fn get_blobs(&self, block_root: &Hash256) -> Result, Error> { // Check the cache. if let Some(blobs) = self.block_cache.lock().get_blobs(block_root) { metrics::inc_counter(&metrics::BEACON_BLOBS_CACHE_HIT_COUNT); - return Ok(Some(blobs.clone())); + return Ok(blobs.clone().into()); } match self @@ -2057,13 +2057,27 @@ impl, Cold: ItemStore> HotColdDB .get_bytes(DBColumn::BeaconBlob.into(), block_root.as_slice())? { Some(ref blobs_bytes) => { - let blobs = BlobSidecarList::from_ssz_bytes(blobs_bytes)?; - self.block_cache - .lock() - .put_blobs(*block_root, blobs.clone()); - Ok(Some(blobs)) + // We insert a VariableList of BlobSidecars into the db, but retrieve + // a plain vec since we don't know the length limit of the list without + // knowing the slot. + // The encoding of a VariableList is the same as a regular vec. + let blobs: Vec>> = Vec::<_>::from_ssz_bytes(blobs_bytes)?; + if let Some(max_blobs_per_block) = blobs + .first() + .map(|blob| self.spec.max_blobs_per_block(blob.epoch())) + { + let blobs = BlobSidecarList::from_vec(blobs, max_blobs_per_block as usize); + self.block_cache + .lock() + .put_blobs(*block_root, blobs.clone()); + + Ok(BlobSidecarListFromRoot::Blobs(blobs)) + } else { + // This always implies that there were no blobs for this block_root + Ok(BlobSidecarListFromRoot::NoBlobs) + } } - None => Ok(None), + None => Ok(BlobSidecarListFromRoot::NoRoot), } } diff --git a/beacon_node/store/src/impls/execution_payload.rs b/beacon_node/store/src/impls/execution_payload.rs index 5c60aa8d7e..097b069a66 100644 --- a/beacon_node/store/src/impls/execution_payload.rs +++ b/beacon_node/store/src/impls/execution_payload.rs @@ -1,7 +1,7 @@ use crate::{DBColumn, Error, StoreItem}; use ssz::{Decode, Encode}; use types::{ - BlobSidecarList, EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, + EthSpec, ExecutionPayload, ExecutionPayloadBellatrix, ExecutionPayloadCapella, ExecutionPayloadDeneb, ExecutionPayloadElectra, ExecutionPayloadFulu, }; @@ -27,7 +27,6 @@ impl_store_item!(ExecutionPayloadCapella); impl_store_item!(ExecutionPayloadDeneb); impl_store_item!(ExecutionPayloadElectra); impl_store_item!(ExecutionPayloadFulu); -impl_store_item!(BlobSidecarList); /// This fork-agnostic implementation should be only used for writing. /// diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 09ae9a32dd..1458fa846c 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -7,6 +7,7 @@ //! //! Provides a simple API for storing/retrieving all types that sometimes needs type-hints. See //! tests for implementation examples. +pub mod blob_sidecar_list_from_root; pub mod chunked_iter; pub mod chunked_vector; pub mod config; @@ -28,6 +29,7 @@ pub mod state_cache; pub mod iter; +pub use self::blob_sidecar_list_from_root::BlobSidecarListFromRoot; pub use self::config::StoreConfig; pub use self::consensus_context::OnDiskConsensusContext; pub use self::hot_cold_store::{HotColdDB, HotStateSummary, Split}; diff --git a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml index a107f6147a..a303bea268 100644 --- a/common/eth2_network_config/built_in_network_configs/chiado/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/chiado/config.yaml @@ -135,6 +135,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml index f71984059a..68d2b0eafe 100644 --- a/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/gnosis/config.yaml @@ -118,6 +118,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 16384 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml index 6d344b5b52..930ce0a1bc 100644 --- a/common/eth2_network_config/built_in_network_configs/holesky/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/holesky/config.yaml @@ -124,6 +124,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml index 244ddd564d..638f6fe42f 100644 --- a/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/mainnet/config.yaml @@ -141,6 +141,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml index 88f8359bd1..3818518897 100644 --- a/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml +++ b/common/eth2_network_config/built_in_network_configs/sepolia/config.yaml @@ -119,6 +119,8 @@ MAX_REQUEST_BLOB_SIDECARS: 768 MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS: 4096 # `6` BLOB_SIDECAR_SUBNET_COUNT: 6 +# `uint64(6)` +MAX_BLOBS_PER_BLOCK: 6 # DAS CUSTODY_REQUIREMENT: 4 diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 22e0a5eab3..782dbe2a54 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -391,10 +391,12 @@ pub fn partially_verify_execution_payload = VariableList::MaxBlobCommitmentsPerBlock>; -pub type KzgCommitmentOpts = - FixedVector, ::MaxBlobsPerBlock>; /// The number of leaves (including padding) on the `BeaconBlockBody` Merkle tree. /// diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 302aa2a4c1..ff4555747c 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,10 +1,10 @@ use crate::test_utils::TestRandom; use crate::{ - beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, - Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, + beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, AbstractExecPayload, BeaconBlockHeader, + BeaconStateError, Blob, ChainSpec, Epoch, EthSpec, FixedVector, ForkName, + ForkVersionDeserialize, Hash256, KzgProofs, RuntimeFixedVector, RuntimeVariableList, + SignedBeaconBlock, SignedBeaconBlockHeader, Slot, VariableList, }; -use crate::{AbstractExecPayload, ForkName}; -use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; use kzg::{Blob as KzgBlob, Kzg, KzgCommitment, KzgProof, BYTES_PER_BLOB, BYTES_PER_FIELD_ELEMENT}; @@ -30,19 +30,6 @@ pub struct BlobIdentifier { pub index: u64, } -impl BlobIdentifier { - pub fn get_all_blob_ids(block_root: Hash256) -> Vec { - let mut blob_ids = Vec::with_capacity(E::max_blobs_per_block()); - for i in 0..E::max_blobs_per_block() { - blob_ids.push(BlobIdentifier { - block_root, - index: i as u64, - }); - } - blob_ids - } -} - impl PartialOrd for BlobIdentifier { fn partial_cmp(&self, other: &Self) -> Option { Some(self.cmp(other)) @@ -291,19 +278,23 @@ impl BlobSidecar { blobs: BlobsList, block: &SignedBeaconBlock, kzg_proofs: KzgProofs, + spec: &ChainSpec, ) -> Result, BlobSidecarError> { let mut blob_sidecars = vec![]; for (i, (kzg_proof, blob)) in kzg_proofs.iter().zip(blobs).enumerate() { let blob_sidecar = BlobSidecar::new(i, blob, block, *kzg_proof)?; blob_sidecars.push(Arc::new(blob_sidecar)); } - Ok(VariableList::from(blob_sidecars)) + Ok(RuntimeVariableList::from_vec( + blob_sidecars, + spec.max_blobs_per_block(block.epoch()) as usize, + )) } } -pub type BlobSidecarList = VariableList>, ::MaxBlobsPerBlock>; -pub type FixedBlobSidecarList = - FixedVector>>, ::MaxBlobsPerBlock>; +pub type BlobSidecarList = RuntimeVariableList>>; +/// Alias for a non length-constrained list of `BlobSidecar`s. +pub type FixedBlobSidecarList = RuntimeFixedVector>>>; pub type BlobsList = VariableList, ::MaxBlobCommitmentsPerBlock>; impl ForkVersionDeserialize for BlobSidecarList { diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index f0bfeba680..65f4c37aa1 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -237,6 +237,7 @@ pub struct ChainSpec { pub max_request_data_column_sidecars: u64, pub min_epochs_for_blob_sidecars_requests: u64, pub blob_sidecar_subnet_count: u64, + max_blobs_per_block: u64, /* * Networking Derived @@ -616,6 +617,17 @@ impl ChainSpec { } } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. + pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { + self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) + } + + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. + pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { + // TODO(electra): add Electra blobs per block change here + self.max_blobs_per_block + } + pub fn data_columns_per_subnet(&self) -> usize { self.number_of_columns .safe_div(self.data_column_sidecar_subnet_count as usize) @@ -859,6 +871,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: default_min_epochs_for_blob_sidecars_requests(), blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1187,6 +1200,7 @@ impl ChainSpec { max_request_data_column_sidecars: default_max_request_data_column_sidecars(), min_epochs_for_blob_sidecars_requests: 16384, blob_sidecar_subnet_count: default_blob_sidecar_subnet_count(), + max_blobs_per_block: default_max_blobs_per_block(), /* * Derived Deneb Specific @@ -1388,6 +1402,9 @@ pub struct Config { #[serde(default = "default_blob_sidecar_subnet_count")] #[serde(with = "serde_utils::quoted_u64")] blob_sidecar_subnet_count: u64, + #[serde(default = "default_max_blobs_per_block")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block: u64, #[serde(default = "default_min_per_epoch_churn_limit_electra")] #[serde(with = "serde_utils::quoted_u64")] @@ -1523,6 +1540,12 @@ const fn default_blob_sidecar_subnet_count() -> u64 { 6 } +/// Its important to keep this consistent with the deneb preset value for +/// `MAX_BLOBS_PER_BLOCK` else we might run into consensus issues. +const fn default_max_blobs_per_block() -> u64 { + 6 +} + const fn default_min_per_epoch_churn_limit_electra() -> u64 { 128_000_000_000 } @@ -1745,6 +1768,7 @@ impl Config { max_request_data_column_sidecars: spec.max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests: spec.min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count: spec.blob_sidecar_subnet_count, + max_blobs_per_block: spec.max_blobs_per_block, min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec @@ -1822,6 +1846,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, @@ -1890,6 +1915,7 @@ impl Config { max_request_data_column_sidecars, min_epochs_for_blob_sidecars_requests, blob_sidecar_subnet_count, + max_blobs_per_block, min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, diff --git a/consensus/types/src/data_column_sidecar.rs b/consensus/types/src/data_column_sidecar.rs index 57251e319a..b2a050e9d5 100644 --- a/consensus/types/src/data_column_sidecar.rs +++ b/consensus/types/src/data_column_sidecar.rs @@ -1,7 +1,7 @@ use crate::beacon_block_body::{KzgCommitments, BLOB_KZG_COMMITMENTS_INDEX}; use crate::test_utils::TestRandom; use crate::BeaconStateError; -use crate::{BeaconBlockHeader, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; +use crate::{BeaconBlockHeader, Epoch, EthSpec, Hash256, KzgProofs, SignedBeaconBlockHeader, Slot}; use bls::Signature; use derivative::Derivative; use kzg::Error as KzgError; @@ -11,7 +11,6 @@ use safe_arith::ArithError; use serde::{Deserialize, Serialize}; use ssz::Encode; use ssz_derive::{Decode, Encode}; -use ssz_types::typenum::Unsigned; use ssz_types::Error as SszError; use ssz_types::{FixedVector, VariableList}; use std::hash::Hash; @@ -68,6 +67,10 @@ impl DataColumnSidecar { self.signed_block_header.message.slot } + pub fn epoch(&self) -> Epoch { + self.slot().epoch(E::slots_per_epoch()) + } + pub fn block_root(&self) -> Hash256 { self.signed_block_header.message.tree_hash_root() } @@ -110,18 +113,16 @@ impl DataColumnSidecar { .len() } - pub fn max_size() -> usize { + pub fn max_size(max_blobs_per_block: usize) -> usize { Self { index: 0, - column: VariableList::new(vec![Cell::::default(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + column: VariableList::new(vec![Cell::::default(); max_blobs_per_block]).unwrap(), kzg_commitments: VariableList::new(vec![ KzgCommitment::empty_for_testing(); - E::MaxBlobsPerBlock::to_usize() + max_blobs_per_block ]) .unwrap(), - kzg_proofs: VariableList::new(vec![KzgProof::empty(); E::MaxBlobsPerBlock::to_usize()]) - .unwrap(), + kzg_proofs: VariableList::new(vec![KzgProof::empty(); max_blobs_per_block]).unwrap(), signed_block_header: SignedBeaconBlockHeader { message: BeaconBlockHeader::empty(), signature: Signature::empty(), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 23e8276209..976766dfa9 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -4,8 +4,7 @@ use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U6, U625, U64, U65536, U8, - U8192, + U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, }; use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; @@ -109,7 +108,6 @@ pub trait EthSpec: /* * New in Deneb */ - type MaxBlobsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type MaxBlobCommitmentsPerBlock: Unsigned + Clone + Sync + Send + Debug + PartialEq + Unpin; type FieldElementsPerBlob: Unsigned + Clone + Sync + Send + Debug + PartialEq; type BytesPerFieldElement: Unsigned + Clone + Sync + Send + Debug + PartialEq; @@ -281,11 +279,6 @@ pub trait EthSpec: Self::MaxWithdrawalsPerPayload::to_usize() } - /// Returns the `MAX_BLOBS_PER_BLOCK` constant for this specification. - fn max_blobs_per_block() -> usize { - Self::MaxBlobsPerBlock::to_usize() - } - /// Returns the `MAX_BLOB_COMMITMENTS_PER_BLOCK` constant for this specification. fn max_blob_commitments_per_block() -> usize { Self::MaxBlobCommitmentsPerBlock::to_usize() @@ -421,7 +414,6 @@ impl EthSpec for MainnetEthSpec { type GasLimitDenominator = U1024; type MinGasLimit = U5000; type MaxExtraDataBytes = U32; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type BytesPerFieldElement = U32; type FieldElementsPerBlob = U4096; @@ -505,7 +497,6 @@ impl EthSpec for MinimalEthSpec { MinGasLimit, MaxExtraDataBytes, MaxBlsToExecutionChanges, - MaxBlobsPerBlock, BytesPerFieldElement, PendingDepositsLimit, MaxPendingDepositsPerEpoch, @@ -559,7 +550,6 @@ impl EthSpec for GnosisEthSpec { type SlotsPerEth1VotingPeriod = U1024; // 64 epochs * 16 slots per epoch type MaxBlsToExecutionChanges = U16; type MaxWithdrawalsPerPayload = U8; - type MaxBlobsPerBlock = U6; type MaxBlobCommitmentsPerBlock = U4096; type FieldElementsPerBlob = U4096; type BytesPerFieldElement = U32; diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 282f27a517..54d8bf51b6 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -108,6 +108,7 @@ pub mod data_column_sidecar; pub mod data_column_subnet_id; pub mod light_client_header; pub mod non_zero_usize; +pub mod runtime_fixed_vector; pub mod runtime_var_list; pub use crate::activation_queue::ActivationQueue; @@ -223,6 +224,7 @@ pub use crate::preset::{ pub use crate::proposer_preparation_data::ProposerPreparationData; pub use crate::proposer_slashing::ProposerSlashing; pub use crate::relative_epoch::{Error as RelativeEpochError, RelativeEpoch}; +pub use crate::runtime_fixed_vector::RuntimeFixedVector; pub use crate::runtime_var_list::RuntimeVariableList; pub use crate::selection_proof::SelectionProof; pub use crate::shuffling_id::AttestationShufflingId; diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f8b3665409..f64b7051e5 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -205,8 +205,6 @@ impl CapellaPreset { #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] #[serde(rename_all = "UPPERCASE")] pub struct DenebPreset { - #[serde(with = "serde_utils::quoted_u64")] - pub max_blobs_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] pub max_blob_commitments_per_block: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -216,7 +214,6 @@ pub struct DenebPreset { impl DenebPreset { pub fn from_chain_spec(_spec: &ChainSpec) -> Self { Self { - max_blobs_per_block: E::max_blobs_per_block() as u64, max_blob_commitments_per_block: E::max_blob_commitments_per_block() as u64, field_elements_per_blob: E::field_elements_per_blob() as u64, } diff --git a/consensus/types/src/runtime_fixed_vector.rs b/consensus/types/src/runtime_fixed_vector.rs new file mode 100644 index 0000000000..2b08b7bf70 --- /dev/null +++ b/consensus/types/src/runtime_fixed_vector.rs @@ -0,0 +1,81 @@ +//! Emulates a fixed size array but with the length set at runtime. +//! +//! The length of the list cannot be changed once it is set. + +#[derive(Clone, Debug)] +pub struct RuntimeFixedVector { + vec: Vec, + len: usize, +} + +impl RuntimeFixedVector { + pub fn new(vec: Vec) -> Self { + let len = vec.len(); + Self { vec, len } + } + + pub fn to_vec(&self) -> Vec { + self.vec.clone() + } + + pub fn as_slice(&self) -> &[T] { + self.vec.as_slice() + } + + #[allow(clippy::len_without_is_empty)] + pub fn len(&self) -> usize { + self.len + } + + pub fn into_vec(self) -> Vec { + self.vec + } + + pub fn default(max_len: usize) -> Self { + Self { + vec: vec![T::default(); max_len], + len: max_len, + } + } + + pub fn take(&mut self) -> Self { + let new = std::mem::take(&mut self.vec); + *self = Self::new(vec![T::default(); self.len]); + Self { + vec: new, + len: self.len, + } + } +} + +impl std::ops::Deref for RuntimeFixedVector { + type Target = [T]; + + fn deref(&self) -> &[T] { + &self.vec[..] + } +} + +impl std::ops::DerefMut for RuntimeFixedVector { + fn deref_mut(&mut self) -> &mut [T] { + &mut self.vec[..] + } +} + +impl IntoIterator for RuntimeFixedVector { + type Item = T; + type IntoIter = std::vec::IntoIter; + + fn into_iter(self) -> Self::IntoIter { + self.vec.into_iter() + } +} + +impl<'a, T> IntoIterator for &'a RuntimeFixedVector { + type Item = &'a T; + type IntoIter = std::slice::Iter<'a, T>; + + fn into_iter(self) -> Self::IntoIter { + self.vec.iter() + } +} diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 8290876fa1..857073b3b8 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -2,7 +2,7 @@ use derivative::Derivative; use serde::{Deserialize, Serialize}; use ssz::Decode; use ssz_types::Error; -use std::ops::{Deref, DerefMut, Index, IndexMut}; +use std::ops::{Deref, Index, IndexMut}; use std::slice::SliceIndex; /// Emulates a SSZ `List`. @@ -10,6 +10,8 @@ use std::slice::SliceIndex; /// An ordered, heap-allocated, variable-length, homogeneous collection of `T`, with no more than /// `max_len` values. /// +/// To ensure there are no inconsistent states, we do not allow any mutating operation if `max_len` is not set. +/// /// ## Example /// /// ``` @@ -35,6 +37,7 @@ use std::slice::SliceIndex; /// /// // Push a value to if it _does_ exceed the maximum. /// assert!(long.push(6).is_err()); +/// /// ``` #[derive(Debug, Clone, Serialize, Deserialize, Derivative)] #[derivative(PartialEq, Eq, Hash(bound = "T: std::hash::Hash"))] @@ -65,7 +68,7 @@ impl RuntimeVariableList { Self { vec, max_len } } - /// Create an empty list. + /// Create an empty list with the given `max_len`. pub fn empty(max_len: usize) -> Self { Self { vec: vec![], @@ -77,6 +80,10 @@ impl RuntimeVariableList { self.vec.as_slice() } + pub fn as_mut_slice(&mut self) -> &mut [T] { + self.vec.as_mut_slice() + } + /// Returns the number of values presently in `self`. pub fn len(&self) -> usize { self.vec.len() @@ -88,6 +95,8 @@ impl RuntimeVariableList { } /// Returns the type-level maximum length. + /// + /// Returns `None` if self is uninitialized with a max_len. pub fn max_len(&self) -> usize { self.max_len } @@ -169,12 +178,6 @@ impl Deref for RuntimeVariableList { } } -impl DerefMut for RuntimeVariableList { - fn deref_mut(&mut self) -> &mut [T] { - &mut self.vec[..] - } -} - impl<'a, T> IntoIterator for &'a RuntimeVariableList { type Item = &'a T; type IntoIter = std::slice::Iter<'a, T>; diff --git a/lcli/src/mock_el.rs b/lcli/src/mock_el.rs index 7719f02aa3..2e2c27a2db 100644 --- a/lcli/src/mock_el.rs +++ b/lcli/src/mock_el.rs @@ -9,6 +9,7 @@ use execution_layer::{ }; use std::net::Ipv4Addr; use std::path::PathBuf; +use std::sync::Arc; use types::*; pub fn run(mut env: Environment, matches: &ArgMatches) -> Result<(), String> { @@ -22,7 +23,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< let osaka_time = parse_optional(matches, "osaka-time")?; let handle = env.core_context().executor.handle().unwrap(); - let spec = &E::default_spec(); + let spec = Arc::new(E::default_spec()); let jwt_key = JwtKey::from_slice(&DEFAULT_JWT_SECRET).unwrap(); std::fs::write(jwt_path, hex::encode(DEFAULT_JWT_SECRET)).unwrap(); @@ -41,7 +42,7 @@ pub fn run(mut env: Environment, matches: &ArgMatches) -> Result< osaka_time, }; let kzg = None; - let server: MockServer = MockServer::new_with_config(&handle, config, kzg); + let server: MockServer = MockServer::new_with_config(&handle, config, spec, kzg); if all_payloads_valid { eprintln!( diff --git a/testing/node_test_rig/src/lib.rs b/testing/node_test_rig/src/lib.rs index ac01c84b9d..6e632ccf54 100644 --- a/testing/node_test_rig/src/lib.rs +++ b/testing/node_test_rig/src/lib.rs @@ -7,6 +7,7 @@ use environment::RuntimeContext; use eth2::{reqwest::ClientBuilder, BeaconNodeHttpClient, Timeouts}; use sensitive_url::SensitiveUrl; use std::path::PathBuf; +use std::sync::Arc; use std::time::Duration; use std::time::{SystemTime, UNIX_EPOCH}; use tempfile::{Builder as TempBuilder, TempDir}; @@ -248,8 +249,14 @@ impl LocalExecutionNode { if let Err(e) = std::fs::write(jwt_file_path, config.jwt_key.hex_string()) { panic!("Failed to write jwt file {}", e); } + let spec = Arc::new(E::default_spec()); Self { - server: MockServer::new_with_config(&context.executor.handle().unwrap(), config, None), + server: MockServer::new_with_config( + &context.executor.handle().unwrap(), + config, + spec, + None, + ), datadir, } } From 348fbdb83816ba213945e5c7fcd8c5c5342aa0ed Mon Sep 17 00:00:00 2001 From: Mac L Date: Fri, 10 Jan 2025 11:35:05 +0400 Subject: [PATCH 11/15] Add missing crates to cargo workspace (#6774) * Add the remaining crates to cargo workspace * Merge branch 'unstable' into add-remaining-crates-workspace --- Cargo.toml | 7 +++- beacon_node/genesis/Cargo.toml | 16 ++++---- beacon_node/operation_pool/Cargo.toml | 14 +++---- common/filesystem/Cargo.toml | 1 - consensus/merkle_proof/Cargo.toml | 2 +- consensus/types/Cargo.toml | 56 +++++++++++++-------------- 6 files changed, 50 insertions(+), 46 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 23e52a306b..233e5fa775 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,11 +9,13 @@ members = [ "beacon_node/client", "beacon_node/eth1", "beacon_node/execution_layer", + "beacon_node/genesis", "beacon_node/http_api", "beacon_node/http_metrics", "beacon_node/lighthouse_network", "beacon_node/lighthouse_network/gossipsub", "beacon_node/network", + "beacon_node/operation_pool", "beacon_node/store", "beacon_node/timer", @@ -30,6 +32,7 @@ members = [ "common/eth2_interop_keypairs", "common/eth2_network_config", "common/eth2_wallet_manager", + "common/filesystem", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -48,14 +51,16 @@ members = [ "common/unused_port", "common/validator_dir", "common/warp_utils", + "consensus/fixed_bytes", "consensus/fork_choice", - "consensus/int_to_bytes", + "consensus/merkle_proof", "consensus/proto_array", "consensus/safe_arith", "consensus/state_processing", "consensus/swap_or_not_shuffle", + "consensus/types", "crypto/bls", "crypto/eth2_key_derivation", diff --git a/beacon_node/genesis/Cargo.toml b/beacon_node/genesis/Cargo.toml index b01e6a6aea..eeca393947 100644 --- a/beacon_node/genesis/Cargo.toml +++ b/beacon_node/genesis/Cargo.toml @@ -9,16 +9,16 @@ eth1_test_rig = { workspace = true } sensitive_url = { workspace = true } [dependencies] -futures = { workspace = true } -types = { workspace = true } environment = { workspace = true } eth1 = { workspace = true } +ethereum_hashing = { workspace = true } +ethereum_ssz = { workspace = true } +futures = { workspace = true } +int_to_bytes = { workspace = true } +merkle_proof = { workspace = true } rayon = { workspace = true } +slog = { workspace = true } state_processing = { workspace = true } -merkle_proof = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_hashing = { workspace = true } -tree_hash = { workspace = true } tokio = { workspace = true } -slog = { workspace = true } -int_to_bytes = { workspace = true } +tree_hash = { workspace = true } +types = { workspace = true } diff --git a/beacon_node/operation_pool/Cargo.toml b/beacon_node/operation_pool/Cargo.toml index 5b48e3f0d8..570b74226c 100644 --- a/beacon_node/operation_pool/Cargo.toml +++ b/beacon_node/operation_pool/Cargo.toml @@ -5,24 +5,24 @@ authors = ["Michael Sproul "] edition = { workspace = true } [dependencies] +bitvec = { workspace = true } derivative = { workspace = true } +ethereum_ssz = { workspace = true } +ethereum_ssz_derive = { workspace = true } itertools = { workspace = true } metrics = { workspace = true } parking_lot = { workspace = true } -types = { workspace = true } -state_processing = { workspace = true } -ethereum_ssz = { workspace = true } -ethereum_ssz_derive = { workspace = true } +rand = { workspace = true } rayon = { workspace = true } serde = { workspace = true } +state_processing = { workspace = true } store = { workspace = true } -bitvec = { workspace = true } -rand = { workspace = true } +types = { workspace = true } [dev-dependencies] beacon_chain = { workspace = true } -tokio = { workspace = true } maplit = { workspace = true } +tokio = { workspace = true } [features] portable = ["beacon_chain/portable"] diff --git a/common/filesystem/Cargo.toml b/common/filesystem/Cargo.toml index fd026bd517..1b5abf03f4 100644 --- a/common/filesystem/Cargo.toml +++ b/common/filesystem/Cargo.toml @@ -3,7 +3,6 @@ name = "filesystem" version = "0.1.0" authors = ["Mark Mackey "] edition = { workspace = true } - # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] diff --git a/consensus/merkle_proof/Cargo.toml b/consensus/merkle_proof/Cargo.toml index c2c6bf270a..2f721d917b 100644 --- a/consensus/merkle_proof/Cargo.toml +++ b/consensus/merkle_proof/Cargo.toml @@ -7,8 +7,8 @@ edition = { workspace = true } [dependencies] alloy-primitives = { workspace = true } ethereum_hashing = { workspace = true } -safe_arith = { workspace = true } fixed_bytes = { workspace = true } +safe_arith = { workspace = true } [dev-dependencies] quickcheck = { workspace = true } diff --git a/consensus/types/Cargo.toml b/consensus/types/Cargo.toml index 21a15fc517..79beb81282 100644 --- a/consensus/types/Cargo.toml +++ b/consensus/types/Cargo.toml @@ -10,56 +10,56 @@ harness = false [dependencies] alloy-primitives = { workspace = true } -merkle_proof = { workspace = true } +alloy-rlp = { version = "0.3.4", features = ["derive"] } +# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by +# `AbstractExecPayload` +arbitrary = { workspace = true, features = ["derive"] } bls = { workspace = true, features = ["arbitrary"] } -kzg = { workspace = true } compare_fields = { workspace = true } compare_fields_derive = { workspace = true } +derivative = { workspace = true } eth2_interop_keypairs = { path = "../../common/eth2_interop_keypairs" } ethereum_hashing = { workspace = true } +ethereum_serde_utils = { workspace = true } +ethereum_ssz = { workspace = true, features = ["arbitrary"] } +ethereum_ssz_derive = { workspace = true } +fixed_bytes = { workspace = true } hex = { workspace = true } int_to_bytes = { workspace = true } +itertools = { workspace = true } +kzg = { workspace = true } log = { workspace = true } -rayon = { workspace = true } +maplit = { workspace = true } +merkle_proof = { workspace = true } +metastruct = "0.1.0" +milhouse = { workspace = true } +parking_lot = { workspace = true } rand = { workspace = true } +rand_xorshift = "0.3.0" +rayon = { workspace = true } +regex = { workspace = true } +rpds = { workspace = true } +rusqlite = { workspace = true } safe_arith = { workspace = true } serde = { workspace = true, features = ["rc"] } +serde_json = { workspace = true } +serde_yaml = { workspace = true } slog = { workspace = true } -ethereum_ssz = { workspace = true, features = ["arbitrary"] } -ethereum_ssz_derive = { workspace = true } +smallvec = { workspace = true } ssz_types = { workspace = true, features = ["arbitrary"] } +superstruct = { workspace = true } swap_or_not_shuffle = { workspace = true, features = ["arbitrary"] } +tempfile = { workspace = true } test_random_derive = { path = "../../common/test_random_derive" } tree_hash = { workspace = true } tree_hash_derive = { workspace = true } -rand_xorshift = "0.3.0" -serde_yaml = { workspace = true } -tempfile = { workspace = true } -derivative = { workspace = true } -rusqlite = { workspace = true } -# The arbitrary dependency is enabled by default since Capella to avoid complexity introduced by -# `AbstractExecPayload` -arbitrary = { workspace = true, features = ["derive"] } -ethereum_serde_utils = { workspace = true } -regex = { workspace = true } -parking_lot = { workspace = true } -itertools = { workspace = true } -superstruct = { workspace = true } -metastruct = "0.1.0" -serde_json = { workspace = true } -smallvec = { workspace = true } -maplit = { workspace = true } -alloy-rlp = { version = "0.3.4", features = ["derive"] } -milhouse = { workspace = true } -rpds = { workspace = true } -fixed_bytes = { workspace = true } [dev-dependencies] -criterion = { workspace = true } beacon_chain = { workspace = true } +criterion = { workspace = true } +paste = { workspace = true } state_processing = { workspace = true } tokio = { workspace = true } -paste = { workspace = true } [features] default = ["sqlite", "legacy-arith"] From c9747fb77f3ff1d8da765003b4988c20ce2d7cd8 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 13 Jan 2025 11:08:51 +1100 Subject: [PATCH 12/15] Refactor feature testing for spec tests (#6737) * Refactor spec testing for features and simplify usage. * Fix `SszStatic` tests for PeerDAS: exclude eip7594 test vectors when testing Electra types. * Merge branch 'unstable' into refactor-ef-tests-features --- testing/ef_tests/src/cases.rs | 36 +++++++++++++-- .../ef_tests/src/cases/get_custody_columns.rs | 9 ++++ .../src/cases/kzg_blob_to_kzg_commitment.rs | 4 -- .../src/cases/kzg_compute_blob_kzg_proof.rs | 4 -- .../cases/kzg_compute_cells_and_kzg_proofs.rs | 8 +++- .../src/cases/kzg_compute_kzg_proof.rs | 4 -- .../cases/kzg_recover_cells_and_kzg_proofs.rs | 8 +++- .../src/cases/kzg_verify_blob_kzg_proof.rs | 4 -- .../cases/kzg_verify_blob_kzg_proof_batch.rs | 4 -- .../cases/kzg_verify_cell_kzg_proof_batch.rs | 8 +++- .../src/cases/kzg_verify_kzg_proof.rs | 4 -- testing/ef_tests/src/handler.rs | 46 ++++++++++++++----- testing/ef_tests/tests/tests.rs | 20 ++++---- 13 files changed, 103 insertions(+), 56 deletions(-) diff --git a/testing/ef_tests/src/cases.rs b/testing/ef_tests/src/cases.rs index 63274ee0c0..8f5571d64a 100644 --- a/testing/ef_tests/src/cases.rs +++ b/testing/ef_tests/src/cases.rs @@ -74,11 +74,37 @@ pub use ssz_generic::*; pub use ssz_static::*; pub use transition::TransitionTest; -#[derive(Debug, PartialEq)] +/// Used for running feature tests for future forks that have not yet been added to `ForkName`. +/// This runs tests in the directory named by the feature instead of the fork name. This has been +/// the pattern used in the `consensus-spec-tests` repository: +/// `consensus-spec-tests/tests/general/[feature_name]/[runner_name].` +/// e.g. consensus-spec-tests/tests/general/peerdas/ssz_static +/// +/// The feature tests can be run with one of the following methods: +/// 1. `handler.run_for_feature(feature_name)` for new tests that are not on existing fork, i.e. a +/// new handler. This will be temporary and the test will need to be updated to use +/// `handle.run()` once the feature is incorporated into a fork. +/// 2. `handler.run()` for tests that are already on existing forks, but with new test vectors for +/// the feature. In this case the `handler.is_enabled_for_feature` will need to be implemented +/// to return `true` for the feature in order for the feature test vector to be tested. +#[derive(Debug, PartialEq, Clone, Copy)] pub enum FeatureName { Eip7594, } +impl FeatureName { + pub fn list_all() -> Vec { + vec![FeatureName::Eip7594] + } + + /// `ForkName` to use when running the feature tests. + pub fn fork_name(&self) -> ForkName { + match self { + FeatureName::Eip7594 => ForkName::Deneb, + } + } +} + impl Display for FeatureName { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { @@ -107,11 +133,13 @@ pub trait Case: Debug + Sync { true } - /// Whether or not this test exists for the given `feature_name`. + /// Whether or not this test exists for the given `feature_name`. This is intended to be used + /// for features that have not been added to a fork yet, and there is usually a separate folder + /// for the feature in the `consensus-spec-tests` repository. /// - /// Returns `true` by default. + /// Returns `false` by default. fn is_enabled_for_feature(_feature_name: FeatureName) -> bool { - true + false } /// Execute a test and return the result. diff --git a/testing/ef_tests/src/cases/get_custody_columns.rs b/testing/ef_tests/src/cases/get_custody_columns.rs index 9665f87730..71b17aeaa3 100644 --- a/testing/ef_tests/src/cases/get_custody_columns.rs +++ b/testing/ef_tests/src/cases/get_custody_columns.rs @@ -21,6 +21,14 @@ impl LoadCase for GetCustodyColumns { } impl Case for GetCustodyColumns { + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } + fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let spec = E::default_spec(); let node_id = U256::from_str_radix(&self.node_id, 10) @@ -33,6 +41,7 @@ impl Case for GetCustodyColumns { ) .expect("should compute custody columns") .collect::>(); + let expected = &self.result; if computed == *expected { Ok(()) diff --git a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs index fa16a5fcb7..feb9a4ff5c 100644 --- a/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs +++ b/testing/ef_tests/src/cases/kzg_blob_to_kzg_commitment.rs @@ -31,10 +31,6 @@ impl Case for KZGBlobToKZGCommitment { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let kzg = get_kzg(); let commitment = parse_blob::(&self.input.blob).and_then(|blob| { diff --git a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs index 694013e251..4aadc37af2 100644 --- a/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_blob_kzg_proof.rs @@ -32,10 +32,6 @@ impl Case for KZGComputeBlobKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeBlobKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs index 2a9f8ceeef..a7219f0629 100644 --- a/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_compute_cells_and_kzg_proofs.rs @@ -26,8 +26,12 @@ impl LoadCase for KZGComputeCellsAndKZGProofs { } impl Case for KZGComputeCellsAndKZGProofs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs index 6f53038f28..4a47fe35eb 100644 --- a/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_compute_kzg_proof.rs @@ -39,10 +39,6 @@ impl Case for KZGComputeKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGComputeKZGProofInput| -> Result<_, Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs index 10cc866fbe..b72b3a05cd 100644 --- a/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs +++ b/testing/ef_tests/src/cases/kzg_recover_cells_and_kzg_proofs.rs @@ -27,8 +27,12 @@ impl LoadCase for KZGRecoverCellsAndKZGProofs { } impl Case for KZGRecoverCellsAndKZGProofs { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs index 3dc955bdcc..66f50d534b 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof.rs @@ -116,10 +116,6 @@ impl Case for KZGVerifyBlobKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofInput| -> Result<(Blob, KzgCommitment, KzgProof), Error> { let blob = parse_blob::(&input.blob)?; diff --git a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs index 80cd0a2849..efd4158806 100644 --- a/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_blob_kzg_proof_batch.rs @@ -33,10 +33,6 @@ impl Case for KZGVerifyBlobKZGProofBatch { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyBlobKZGProofBatchInput| -> Result<_, Error> { let blobs = input diff --git a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs index 5887d764ca..815ad7a5bc 100644 --- a/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs +++ b/testing/ef_tests/src/cases/kzg_verify_cell_kzg_proof_batch.rs @@ -29,8 +29,12 @@ impl LoadCase for KZGVerifyCellKZGProofBatch { } impl Case for KZGVerifyCellKZGProofBatch { - fn is_enabled_for_fork(fork_name: ForkName) -> bool { - fork_name == ForkName::Deneb + fn is_enabled_for_fork(_fork_name: ForkName) -> bool { + false + } + + fn is_enabled_for_feature(feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 } fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs index ed7583dbd0..07df05a6ac 100644 --- a/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs +++ b/testing/ef_tests/src/cases/kzg_verify_kzg_proof.rs @@ -33,10 +33,6 @@ impl Case for KZGVerifyKZGProof { fork_name == ForkName::Deneb } - fn is_enabled_for_feature(feature_name: FeatureName) -> bool { - feature_name != FeatureName::Eip7594 - } - fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let parse_input = |input: &KZGVerifyKZGProofInput| -> Result<_, Error> { let commitment = parse_commitment(&input.commitment)?; diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index e7c148645c..d8fe061061 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -7,9 +7,6 @@ use std::marker::PhantomData; use std::path::PathBuf; use types::{BeaconState, EthSpec, ForkName}; -const EIP7594_FORK: ForkName = ForkName::Deneb; -const EIP7594_TESTS: [&str; 4] = ["ssz_static", "merkle_proof", "networking", "kzg"]; - pub trait Handler { type Case: Case + LoadCase; @@ -39,13 +36,12 @@ pub trait Handler { for fork_name in ForkName::list_all() { if !self.disabled_forks().contains(&fork_name) && self.is_enabled_for_fork(fork_name) { self.run_for_fork(fork_name); + } + } - if fork_name == EIP7594_FORK - && EIP7594_TESTS.contains(&Self::runner_name()) - && self.is_enabled_for_feature(FeatureName::Eip7594) - { - self.run_for_feature(EIP7594_FORK, FeatureName::Eip7594); - } + for feature_name in FeatureName::list_all() { + if self.is_enabled_for_feature(feature_name) { + self.run_for_feature(feature_name); } } } @@ -96,8 +92,9 @@ pub trait Handler { crate::results::assert_tests_pass(&name, &handler_path, &results); } - fn run_for_feature(&self, fork_name: ForkName, feature_name: FeatureName) { + fn run_for_feature(&self, feature_name: FeatureName) { let feature_name_str = feature_name.to_string(); + let fork_name = feature_name.fork_name(); let handler_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) .join("consensus-spec-tests") @@ -352,6 +349,22 @@ where fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { self.supported_forks.contains(&fork_name) } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + // This ensures we only run the tests **once** for `Eip7594`, using the types matching the + // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version + // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. + // + // e.g. Eip7594 test vectors are executed in the first line below, but excluded in the 2nd + // line when testing the type `AttestationElectra`: + // + // ``` + // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); + // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); + // ``` + feature_name == FeatureName::Eip7594 + && self.supported_forks.contains(&feature_name.fork_name()) + } } impl Handler for SszStaticTHCHandler, E> @@ -371,6 +384,10 @@ where fn handler_name(&self) -> String { BeaconState::::name().into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } impl Handler for SszStaticWithSpecHandler @@ -392,6 +409,10 @@ where fn handler_name(&self) -> String { T::name().into() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } #[derive(Derivative)] @@ -971,9 +992,12 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - // Enabled in Deneb fork_name.deneb_enabled() } + + fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + feature_name == FeatureName::Eip7594 + } } #[derive(Derivative)] diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 292625a371..691d27951a 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -627,17 +627,17 @@ mod ssz_static { #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); SszStaticHandler::, MainnetEthSpec>::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn data_column_identifier() { SszStaticHandler::::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); SszStaticHandler::::deneb_only() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] @@ -902,19 +902,19 @@ fn kzg_verify_kzg_proof() { #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn kzg_verify_cell_proof_batch() { KZGVerifyCellKZGProofBatchHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + .run_for_feature(FeatureName::Eip7594); } #[test] @@ -949,8 +949,6 @@ fn rewards() { #[test] fn get_custody_columns() { - GetCustodyColumnsHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); - GetCustodyColumnsHandler::::default() - .run_for_feature(ForkName::Deneb, FeatureName::Eip7594); + GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); + GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); } From 06e4d22d4954807ddc755feaea7518ad2ce06f35 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 14 Jan 2025 10:17:00 +1100 Subject: [PATCH 13/15] Electra spec changes for `v1.5.0-beta.0` (#6731) * First pass * Add restrictions to RuntimeVariableList api * Use empty_uninitialized and fix warnings * Fix some todos * Merge branch 'unstable' into max-blobs-preset * Fix take impl on RuntimeFixedList * cleanup * Fix test compilations * Fix some more tests * Fix test from unstable * Merge branch 'unstable' into max-blobs-preset * Implement "Bugfix and more withdrawal tests" * Implement "Add missed exit checks to consolidation processing" * Implement "Update initial earliest_exit_epoch calculation" * Implement "Limit consolidating balance by validator.effective_balance" * Implement "Use 16-bit random value in validator filter" * Implement "Do not change creds type on consolidation" * Rename PendingPartialWithdraw index field to validator_index * Skip slots to get test to pass and add TODO * Implement "Synchronously check all transactions to have non-zero length" * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Remove footgun function * Minor simplifications * Move from preset to config * Fix typo * Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. * Try fixing tests * Implement "bump minimal preset MAX_BLOB_COMMITMENTS_PER_BLOCK and KZG_COMMITMENT_INCLUSION_PROOF_DEPTH" * Thread through ChainSpec * Fix release tests * Move RuntimeFixedVector into module and rename * Add test * Implement "Remove post-altair `initialize_beacon_state_from_eth1` from specs" * Update preset YAML * Remove empty RuntimeVarList awefullness * Make max_blobs_per_block a config parameter (#6329) Squashed commit of the following: commit 04b3743ec1e0b650269dd8e58b540c02430d1c0d Author: Michael Sproul Date: Mon Jan 6 17:36:58 2025 +1100 Add test commit 440e85419940d4daba406d910e7908dd1fe78668 Author: Michael Sproul Date: Mon Jan 6 17:24:50 2025 +1100 Move RuntimeFixedVector into module and rename commit f66e179a40c3917eee39a93534ecf75480172699 Author: Michael Sproul Date: Mon Jan 6 17:17:17 2025 +1100 Fix release tests commit e4bfe71cd1f0a2784d0bd57f85b2f5d8cf503ac1 Author: Michael Sproul Date: Mon Jan 6 17:05:30 2025 +1100 Thread through ChainSpec commit 063b79c16abd3f6df47b85efcf3858177bc933b9 Author: Michael Sproul Date: Mon Jan 6 15:32:16 2025 +1100 Try fixing tests commit 88bedf09bc647de66bd1ff944bbc8fb13e2b7590 Author: Michael Sproul Date: Mon Jan 6 15:04:37 2025 +1100 Revert "Remove footgun function" This reverts commit de01f923c7452355c87f50c0e8031ca94fa00d36. commit 32483d385b66f252d50cee5b524e2924157bdcd4 Author: Michael Sproul Date: Mon Jan 6 15:04:32 2025 +1100 Fix typo commit 2e86585b478c012f6e3483989c87e38161227674 Author: Michael Sproul Date: Mon Jan 6 15:04:15 2025 +1100 Move from preset to config commit 1095d60a40be20dd3c229b759fc3c228b51e51e3 Author: Michael Sproul Date: Mon Jan 6 14:38:40 2025 +1100 Minor simplifications commit de01f923c7452355c87f50c0e8031ca94fa00d36 Author: Michael Sproul Date: Mon Jan 6 14:06:57 2025 +1100 Remove footgun function commit 0c2c8c42245c25b8cf17885faf20acd3b81140ec Merge: 21ecb58ff f51a292f7 Author: Michael Sproul Date: Mon Jan 6 14:02:50 2025 +1100 Merge remote-tracking branch 'origin/unstable' into max-blobs-preset commit f51a292f77575a1786af34271fb44954f141c377 Author: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri Jan 3 20:27:21 2025 +0100 fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds commit 7e0cddef321c2a069582c65b58e5f46590d60c49 Author: Akihito Nakano Date: Tue Dec 24 10:38:56 2024 +0900 Make sure we have fanout peers when publish (#6738) * Ensure that `fanout_peers` is always non-empty if it's `Some` commit 21ecb58ff88b86435ab62d9ac227394c10fdcd22 Merge: 2fcb2935e 9aefb5539 Author: Pawan Dhananjay Date: Mon Oct 21 14:46:00 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 2fcb2935ec7ef4cd18bbdd8aedb7de61fac69e61 Author: Pawan Dhananjay Date: Fri Sep 6 18:28:31 2024 -0700 Fix test from unstable commit 12c6ef118a1a6d910c48d9d4b23004f3609264c7 Author: Pawan Dhananjay Date: Wed Sep 4 16:16:36 2024 -0700 Fix some more tests commit d37733b846ce58e318e976d6503ca394b4901141 Author: Pawan Dhananjay Date: Wed Sep 4 12:47:36 2024 -0700 Fix test compilations commit 52bb581e071d5f474d519366e860a4b3a0b52f78 Author: Pawan Dhananjay Date: Tue Sep 3 18:38:19 2024 -0700 cleanup commit e71020e3e613910e0315f558ead661b490a0ff20 Author: Pawan Dhananjay Date: Tue Sep 3 17:16:10 2024 -0700 Fix take impl on RuntimeFixedList commit 13f9bba6470b2140e5c34f14aed06dab2b062c1c Merge: 60100fc6b 4e675cf5d Author: Pawan Dhananjay Date: Tue Sep 3 16:08:59 2024 -0700 Merge branch 'unstable' into max-blobs-preset commit 60100fc6be72792ff33913d7e5a53434c792aacf Author: Pawan Dhananjay Date: Fri Aug 30 16:04:11 2024 -0700 Fix some todos commit a9cb329a221a809f7dd818984753826f91c2e26b Author: Pawan Dhananjay Date: Fri Aug 30 15:54:00 2024 -0700 Use empty_uninitialized and fix warnings commit 4dc6e6515ecf75cefa4de840edc7b57e76a8fc9e Author: Pawan Dhananjay Date: Fri Aug 30 15:53:18 2024 -0700 Add restrictions to RuntimeVariableList api commit 25feedfde348b530c4fa2348cc71a06b746898ed Author: Pawan Dhananjay Date: Thu Aug 29 16:11:19 2024 -0700 First pass * Fix tests * Implement max_blobs_per_block_electra * Fix config issues * Simplify BlobSidecarListFromRoot * Disable PeerDAS tests * Merge remote-tracking branch 'origin/unstable' into max-blobs-preset * Bump quota to account for new target (6) * Remove clone * Fix issue from review * Try to remove ugliness * Merge branch 'unstable' into max-blobs-preset * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Merge commit '04b3743ec1e0b650269dd8e58b540c02430d1c0d' into electra-alpha10 * Merge remote-tracking branch 'pawan/max-blobs-preset' into electra-alpha10 * Update tests to v1.5.0-beta.0 * Resolve merge conflicts * Linting * fmt * Fix test and add TODO * Gracefully handle slashed proposers in fork choice tests * Merge remote-tracking branch 'origin/unstable' into electra-alpha10 * Keep latest changes from max_blobs_per_block PR in codec.rs * Revert a few more regressions and add a comment * Disable more DAS tests * Improve validator monitor test a little * Make test more robust * Fix sync test that didn't understand blobs * Fill out cropped comment --- .../beacon_chain/tests/validator_monitor.rs | 82 ++++++++----------- .../src/engine_api/new_payload_request.rs | 5 ++ beacon_node/execution_layer/src/lib.rs | 1 + .../lighthouse_network/src/rpc/protocol.rs | 2 +- beacon_node/network/src/sync/tests/range.rs | 53 ++++++++---- consensus/fork_choice/tests/tests.rs | 61 +++++++------- .../src/per_block_processing.rs | 36 +++++--- .../process_operations.rs | 33 +++++--- .../src/per_epoch_processing/single_pass.rs | 4 +- .../state_processing/src/upgrade/electra.rs | 4 +- consensus/types/presets/gnosis/electra.yaml | 13 ++- consensus/types/presets/mainnet/altair.yaml | 2 + consensus/types/presets/mainnet/electra.yaml | 13 ++- consensus/types/presets/mainnet/phase0.yaml | 2 +- consensus/types/presets/minimal/altair.yaml | 2 + consensus/types/presets/minimal/deneb.yaml | 8 +- consensus/types/presets/minimal/electra.yaml | 15 ++-- consensus/types/presets/minimal/phase0.yaml | 6 +- consensus/types/src/beacon_state.rs | 53 ++++++++++-- consensus/types/src/chain_spec.rs | 24 +++++- consensus/types/src/eth_spec.rs | 14 ++-- .../types/src/pending_partial_withdrawal.rs | 2 +- consensus/types/src/preset.rs | 4 +- testing/ef_tests/Makefile | 2 +- testing/ef_tests/check_all_files_accessed.py | 6 ++ .../src/cases/genesis_initialization.rs | 3 +- .../ef_tests/src/cases/genesis_validity.rs | 4 + testing/ef_tests/src/handler.rs | 23 ++++-- testing/ef_tests/tests/tests.rs | 10 ++- 29 files changed, 309 insertions(+), 178 deletions(-) diff --git a/beacon_node/beacon_chain/tests/validator_monitor.rs b/beacon_node/beacon_chain/tests/validator_monitor.rs index 91de4fe270..180db6d76d 100644 --- a/beacon_node/beacon_chain/tests/validator_monitor.rs +++ b/beacon_node/beacon_chain/tests/validator_monitor.rs @@ -4,7 +4,7 @@ use beacon_chain::test_utils::{ use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS}; use logging::test_logger; use std::sync::LazyLock; -use types::{Epoch, EthSpec, ForkName, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; +use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot}; // Should ideally be divisible by 3. pub const VALIDATOR_COUNT: usize = 48; @@ -117,7 +117,7 @@ async fn missed_blocks_across_epochs() { } #[tokio::test] -async fn produces_missed_blocks() { +async fn missed_blocks_basic() { let validator_count = 16; let slots_per_epoch = E::slots_per_epoch(); @@ -127,13 +127,10 @@ async fn produces_missed_blocks() { // Generate 63 slots (2 epochs * 32 slots per epoch - 1) let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1; - // The validator index of the validator that is 'supposed' to miss a block - let validator_index_to_monitor = 1; - // 1st scenario // // // Missed block happens when slot and prev_slot are in the same epoch - let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness1 = get_harness(validator_count, vec![]); harness1 .extend_chain( initial_blocks as usize, @@ -153,7 +150,7 @@ async fn produces_missed_blocks() { let mut prev_slot = Slot::new(idx - 1); let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap(); let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap(); - let mut validator_index = validator_indexes[slot_in_epoch.as_usize()]; + let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let mut proposer_shuffling_decision_root = _state .proposer_shuffling_decision_root(duplicate_block_root) .unwrap(); @@ -170,7 +167,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state.fork() ), Ok(()) @@ -187,12 +184,15 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor = harness1.chain.validator_monitor.write(); + + validator_monitor.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64), - 1 + validator_monitor + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), + 1, ); } @@ -201,23 +201,7 @@ async fn produces_missed_blocks() { // Missed block happens when slot and prev_slot are not in the same epoch // making sure that the cache reloads when the epoch changes // in that scenario the slot that missed a block is the first slot of the epoch - // We are adding other validators to monitor as these ones will miss a block depending on - // the fork name specified when running the test as the proposer cache differs depending on - // the fork name (cf. seed) - // - // If you are adding a new fork and seeing errors, print - // `validator_indexes[slot_in_epoch.as_usize()]` and add it below. - let validator_index_to_monitor = match harness1.spec.fork_name_at_slot::(Slot::new(0)) { - ForkName::Base => 7, - ForkName::Altair => 2, - ForkName::Bellatrix => 4, - ForkName::Capella => 11, - ForkName::Deneb => 3, - ForkName::Electra => 1, - ForkName::Fulu => 6, - }; - - let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness2 = get_harness(validator_count, vec![]); let advance_slot_by = 9; harness2 .extend_chain( @@ -238,11 +222,7 @@ async fn produces_missed_blocks() { slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // If you are adding a new fork and seeing errors, it means the fork seed has changed the - // validator_index. Uncomment this line, run the test again and add the resulting index to the - // list above. - //eprintln!("new index which needs to be added => {:?}", validator_index); + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; let beacon_proposer_cache = harness2 .chain @@ -256,7 +236,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, duplicate_block_root, - validator_indexes.into_iter().collect::>(), + validator_indexes.clone(), _state2.fork() ), Ok(()) @@ -271,10 +251,12 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor2 = harness2.chain.validator_monitor.write(); + validator_monitor2.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We should have one entry in the missed blocks map assert_eq!( - validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor2 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 1 ); @@ -282,19 +264,20 @@ async fn produces_missed_blocks() { // // A missed block happens but the validator is not monitored // it should not be flagged as a missed block - idx = initial_blocks + (advance_slot_by) - 7; + while validator_indexes[(idx % slots_per_epoch) as usize] == missed_block_proposer + && idx / slots_per_epoch == epoch.as_u64() + { + idx += 1; + } slot = Slot::new(idx); prev_slot = Slot::new(idx - 1); slot_in_epoch = slot % slots_per_epoch; duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap(); - validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap(); - let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()]; - // This could do with a refactor: https://github.com/sigp/lighthouse/issues/6293 - assert_ne!( - not_monitored_validator_index, - validator_index_to_monitor, - "this test has a fragile dependency on hardcoded indices. you need to tweak some settings or rewrite this" - ); + let second_missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; + + // This test may fail if we can't find another distinct proposer in the same epoch. + // However, this should be vanishingly unlikely: P ~= (1/16)^32 = 2e-39. + assert_ne!(missed_block_proposer, second_missed_block_proposer); assert_eq!( _state2.set_block_root(prev_slot, duplicate_block_root), @@ -306,10 +289,9 @@ async fn produces_missed_blocks() { validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec); // We shouldn't have any entry in the missed blocks map - assert_ne!(validator_index, not_monitored_validator_index); assert_eq!( validator_monitor2 - .get_monitored_validator_missed_block_count(not_monitored_validator_index as u64), + .get_monitored_validator_missed_block_count(second_missed_block_proposer as u64), 0 ); } @@ -318,7 +300,7 @@ async fn produces_missed_blocks() { // // A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH // it shouldn't be flagged as a missed block - let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]); + let harness3 = get_harness(validator_count, vec![]); harness3 .extend_chain( slots_per_epoch as usize, @@ -338,7 +320,7 @@ async fn produces_missed_blocks() { prev_slot = Slot::new(idx - 1); duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap(); validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap(); - validator_index = validator_indexes[slot_in_epoch.as_usize()]; + missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()]; proposer_shuffling_decision_root = _state3 .proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root) .unwrap(); @@ -355,7 +337,7 @@ async fn produces_missed_blocks() { beacon_proposer_cache.lock().insert( epoch, proposer_shuffling_decision_root, - validator_indexes.into_iter().collect::>(), + validator_indexes, _state3.fork() ), Ok(()) @@ -372,11 +354,13 @@ async fn produces_missed_blocks() { // Let's validate the state which will call the function responsible for // adding the missed blocks to the validator monitor let mut validator_monitor3 = harness3.chain.validator_monitor.write(); + validator_monitor3.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress()); validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec); // We shouldn't have one entry in the missed blocks map assert_eq!( - validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64), + validator_monitor3 + .get_monitored_validator_missed_block_count(missed_block_proposer as u64), 0 ); } diff --git a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs index a86b2fd9bb..23610c9ae4 100644 --- a/beacon_node/execution_layer/src/engine_api/new_payload_request.rs +++ b/beacon_node/execution_layer/src/engine_api/new_payload_request.rs @@ -128,6 +128,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> { let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH); + // Check that no transactions in the payload are zero length + if payload.transactions().iter().any(|slice| slice.is_empty()) { + return Err(Error::ZeroLengthTransaction); + } + let (header_hash, rlp_transactions_root) = calculate_execution_block_hash( payload, parent_beacon_block_root, diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 118d7adfca..f7abe73543 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -157,6 +157,7 @@ pub enum Error { payload: ExecutionBlockHash, transactions_root: Hash256, }, + ZeroLengthTransaction, PayloadBodiesByRangeNotSupported, InvalidJWTSecret(String), InvalidForkForPayload, diff --git a/beacon_node/lighthouse_network/src/rpc/protocol.rs b/beacon_node/lighthouse_network/src/rpc/protocol.rs index 681b739d59..780dff937d 100644 --- a/beacon_node/lighthouse_network/src/rpc/protocol.rs +++ b/beacon_node/lighthouse_network/src/rpc/protocol.rs @@ -710,7 +710,7 @@ pub fn rpc_blob_limits() -> RpcLimits { } } -// TODO(peerdas): fix hardcoded max here +// TODO(das): fix hardcoded max here pub fn rpc_data_column_limits(fork_name: ForkName) -> RpcLimits { RpcLimits::new( DataColumnSidecar::::empty().as_ssz_bytes().len(), diff --git a/beacon_node/network/src/sync/tests/range.rs b/beacon_node/network/src/sync/tests/range.rs index 6faa8b7247..05d5e4a414 100644 --- a/beacon_node/network/src/sync/tests/range.rs +++ b/beacon_node/network/src/sync/tests/range.rs @@ -4,12 +4,15 @@ use crate::sync::manager::SLOT_IMPORT_TOLERANCE; use crate::sync::range_sync::RangeSyncType; use crate::sync::SyncMessage; use beacon_chain::test_utils::{AttestationStrategy, BlockStrategy}; -use beacon_chain::EngineState; +use beacon_chain::{block_verification_types::RpcBlock, EngineState, NotifyExecutionLayer}; use lighthouse_network::rpc::{RequestType, StatusMessage}; use lighthouse_network::service::api_types::{AppRequestId, Id, SyncRequestId}; use lighthouse_network::{PeerId, SyncInfo}; use std::time::Duration; -use types::{EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, Slot}; +use types::{ + BlobSidecarList, BlockImportSource, EthSpec, Hash256, MinimalEthSpec as E, SignedBeaconBlock, + SignedBeaconBlockHash, Slot, +}; const D: Duration = Duration::new(0, 0); @@ -154,7 +157,9 @@ impl TestRig { } } - async fn create_canonical_block(&mut self) -> SignedBeaconBlock { + async fn create_canonical_block( + &mut self, + ) -> (SignedBeaconBlock, Option>) { self.harness.advance_slot(); let block_root = self @@ -165,19 +170,39 @@ impl TestRig { AttestationStrategy::AllValidators, ) .await; - self.harness - .chain - .store - .get_full_block(&block_root) - .unwrap() - .unwrap() + // TODO(das): this does not handle data columns yet + let store = &self.harness.chain.store; + let block = store.get_full_block(&block_root).unwrap().unwrap(); + let blobs = if block.fork_name_unchecked().deneb_enabled() { + store.get_blobs(&block_root).unwrap().blobs() + } else { + None + }; + (block, blobs) } - async fn remember_block(&mut self, block: SignedBeaconBlock) { - self.harness - .process_block(block.slot(), block.canonical_root(), (block.into(), None)) + async fn remember_block( + &mut self, + (block, blob_sidecars): (SignedBeaconBlock, Option>), + ) { + // This code is kind of duplicated from Harness::process_block, but takes sidecars directly. + let block_root = block.canonical_root(); + self.harness.set_current_slot(block.slot()); + let _: SignedBeaconBlockHash = self + .harness + .chain + .process_block( + block_root, + RpcBlock::new(Some(block_root), block.into(), blob_sidecars).unwrap(), + NotifyExecutionLayer::Yes, + BlockImportSource::RangeSync, + || Ok(()), + ) .await + .unwrap() + .try_into() .unwrap(); + self.harness.chain.recompute_head_at_current_slot().await; } } @@ -217,9 +242,9 @@ async fn state_update_while_purging() { // Need to create blocks that can be inserted into the fork-choice and fit the "known // conditions" below. let head_peer_block = rig_2.create_canonical_block().await; - let head_peer_root = head_peer_block.canonical_root(); + let head_peer_root = head_peer_block.0.canonical_root(); let finalized_peer_block = rig_2.create_canonical_block().await; - let finalized_peer_root = finalized_peer_block.canonical_root(); + let finalized_peer_root = finalized_peer_block.0.canonical_root(); // Get a peer with an advanced head let head_peer = rig.add_head_peer_with_root(head_peer_root); diff --git a/consensus/fork_choice/tests/tests.rs b/consensus/fork_choice/tests/tests.rs index 001b80fe11..70b4b73d52 100644 --- a/consensus/fork_choice/tests/tests.rs +++ b/consensus/fork_choice/tests/tests.rs @@ -10,6 +10,7 @@ use beacon_chain::{ use fork_choice::{ ForkChoiceStore, InvalidAttestation, InvalidBlock, PayloadVerificationStatus, QueuedAttestation, }; +use state_processing::state_advance::complete_state_advance; use std::fmt; use std::sync::Mutex; use std::time::Duration; @@ -172,6 +173,20 @@ impl ForkChoiceTest { let validators = self.harness.get_all_validators(); loop { let slot = self.harness.get_current_slot(); + + // Skip slashed proposers, as we expect validators to get slashed in these tests. + // Presently `make_block` will panic if the proposer is slashed, so we just avoid + // calling it in this case. + complete_state_advance(&mut state, None, slot, &self.harness.spec).unwrap(); + state.build_caches(&self.harness.spec).unwrap(); + let proposer_index = state + .get_beacon_proposer_index(slot, &self.harness.chain.spec) + .unwrap(); + if state.validators().get(proposer_index).unwrap().slashed { + self.harness.advance_slot(); + continue; + } + let (block_contents, state_) = self.harness.make_block(state, slot).await; state = state_; if !predicate(block_contents.0.message(), &state) { @@ -196,17 +211,20 @@ impl ForkChoiceTest { } /// Apply `count` blocks to the chain (with attestations). + /// + /// Note that in the case of slashed validators, their proposals will be skipped and the chain + /// may be advanced by *more than* `count` slots. pub async fn apply_blocks(self, count: usize) -> Self { - self.harness.advance_slot(); - self.harness - .extend_chain( - count, - BlockStrategy::OnCanonicalHead, - AttestationStrategy::AllValidators, - ) - .await; - - self + // Use `Self::apply_blocks_while` which gracefully handles slashed validators. + let mut blocks_applied = 0; + self.apply_blocks_while(|_, _| { + // Blocks are applied after the predicate is called, so continue applying the block if + // less than *or equal* to the count. + blocks_applied += 1; + blocks_applied <= count + }) + .await + .unwrap() } /// Slash a validator from the previous epoch committee. @@ -244,6 +262,7 @@ impl ForkChoiceTest { /// Apply `count` blocks to the chain (without attestations). pub async fn apply_blocks_without_new_attestations(self, count: usize) -> Self { + // This function does not gracefully handle slashed proposers, but may need to in future. self.harness.advance_slot(); self.harness .extend_chain( @@ -1226,14 +1245,6 @@ async fn progressive_balances_cache_attester_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change my cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_proposer_slashing`. - .apply_blocks(2) - .await .add_previous_epoch_attester_slashing() .await // expect fork choice to import blocks successfully after a previous epoch attester is @@ -1244,7 +1255,7 @@ async fn progressive_balances_cache_attester_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // attester's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } @@ -1257,15 +1268,7 @@ async fn progressive_balances_cache_proposer_slashing() { .apply_blocks_while(|_, state| state.finalized_checkpoint().epoch == 0) .await .unwrap() - // Note: This test may fail if the shuffling used changes, right now it re-runs with - // deterministic shuffling. A shuffling change may cause the slashed proposer to propose - // again in the next epoch, which results in a block processing failure - // (`HeaderInvalid::ProposerSlashed`). The harness should be re-worked to successfully skip - // the slot in this scenario rather than panic-ing. The same applies to - // `progressive_balances_cache_attester_slashing`. - .apply_blocks(1) - .await - .add_previous_epoch_proposer_slashing(MainnetEthSpec::slots_per_epoch()) + .add_previous_epoch_proposer_slashing(E::slots_per_epoch()) .await // expect fork choice to import blocks successfully after a previous epoch proposer is // slashed, i.e. the slashed proposer's balance is correctly excluded from @@ -1275,6 +1278,6 @@ async fn progressive_balances_cache_proposer_slashing() { // expect fork choice to import another epoch of blocks successfully - the slashed // proposer's balance should be excluded from the current epoch total balance in // `ProgressiveBalancesCache` as well. - .apply_blocks(MainnetEthSpec::slots_per_epoch() as usize) + .apply_blocks(E::slots_per_epoch() as usize) .await; } diff --git a/consensus/state_processing/src/per_block_processing.rs b/consensus/state_processing/src/per_block_processing.rs index 782dbe2a54..502ad25838 100644 --- a/consensus/state_processing/src/per_block_processing.rs +++ b/consensus/state_processing/src/per_block_processing.rs @@ -1,7 +1,7 @@ use crate::consensus_context::ConsensusContext; use errors::{BlockOperationError, BlockProcessingError, HeaderInvalid}; use rayon::prelude::*; -use safe_arith::{ArithError, SafeArith}; +use safe_arith::{ArithError, SafeArith, SafeArithIter}; use signature_sets::{block_proposal_signature_set, get_pubkey_from_state, randao_signature_set}; use std::borrow::Cow; use tree_hash::TreeHash; @@ -509,7 +509,7 @@ pub fn compute_timestamp_at_slot( /// Compute the next batch of withdrawals which should be included in a block. /// -/// https://github.com/ethereum/consensus-specs/blob/dev/specs/capella/beacon-chain.md#new-get_expected_withdrawals +/// https://github.com/ethereum/consensus-specs/blob/dev/specs/electra/beacon-chain.md#new-get_expected_withdrawals pub fn get_expected_withdrawals( state: &BeaconState, spec: &ChainSpec, @@ -522,9 +522,9 @@ pub fn get_expected_withdrawals( // [New in Electra:EIP7251] // Consume pending partial withdrawals - let partial_withdrawals_count = + let processed_partial_withdrawals_count = if let Ok(partial_withdrawals) = state.pending_partial_withdrawals() { - let mut partial_withdrawals_count = 0; + let mut processed_partial_withdrawals_count = 0; for withdrawal in partial_withdrawals { if withdrawal.withdrawable_epoch > epoch || withdrawals.len() == spec.max_pending_partials_per_withdrawals_sweep as usize @@ -532,8 +532,8 @@ pub fn get_expected_withdrawals( break; } - let withdrawal_balance = state.get_balance(withdrawal.index as usize)?; - let validator = state.get_validator(withdrawal.index as usize)?; + let withdrawal_balance = state.get_balance(withdrawal.validator_index as usize)?; + let validator = state.get_validator(withdrawal.validator_index as usize)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; @@ -549,7 +549,7 @@ pub fn get_expected_withdrawals( ); withdrawals.push(Withdrawal { index: withdrawal_index, - validator_index: withdrawal.index, + validator_index: withdrawal.validator_index, address: validator .get_execution_withdrawal_address(spec) .ok_or(BeaconStateError::NonExecutionAddresWithdrawalCredential)?, @@ -557,9 +557,9 @@ pub fn get_expected_withdrawals( }); withdrawal_index.safe_add_assign(1)?; } - partial_withdrawals_count.safe_add_assign(1)?; + processed_partial_withdrawals_count.safe_add_assign(1)?; } - Some(partial_withdrawals_count) + Some(processed_partial_withdrawals_count) } else { None }; @@ -570,9 +570,19 @@ pub fn get_expected_withdrawals( ); for _ in 0..bound { let validator = state.get_validator(validator_index as usize)?; - let balance = *state.balances().get(validator_index as usize).ok_or( - BeaconStateError::BalancesOutOfBounds(validator_index as usize), - )?; + let partially_withdrawn_balance = withdrawals + .iter() + .filter_map(|withdrawal| { + (withdrawal.validator_index == validator_index).then_some(withdrawal.amount) + }) + .safe_sum()?; + let balance = state + .balances() + .get(validator_index as usize) + .ok_or(BeaconStateError::BalancesOutOfBounds( + validator_index as usize, + ))? + .safe_sub(partially_withdrawn_balance)?; if validator.is_fully_withdrawable_at(balance, epoch, spec, fork_name) { withdrawals.push(Withdrawal { index: withdrawal_index, @@ -604,7 +614,7 @@ pub fn get_expected_withdrawals( .safe_rem(state.validators().len() as u64)?; } - Ok((withdrawals.into(), partial_withdrawals_count)) + Ok((withdrawals.into(), processed_partial_withdrawals_count)) } /// Apply withdrawals to the state. diff --git a/consensus/state_processing/src/per_block_processing/process_operations.rs b/consensus/state_processing/src/per_block_processing/process_operations.rs index 4977f7c7e9..82dd616724 100644 --- a/consensus/state_processing/src/per_block_processing/process_operations.rs +++ b/consensus/state_processing/src/per_block_processing/process_operations.rs @@ -507,11 +507,11 @@ pub fn process_withdrawal_requests( } // Verify pubkey exists - let Some(index) = state.pubkey_cache().get(&request.validator_pubkey) else { + let Some(validator_index) = state.pubkey_cache().get(&request.validator_pubkey) else { continue; }; - let validator = state.get_validator(index)?; + let validator = state.get_validator(validator_index)?; // Verify withdrawal credentials let has_correct_credential = validator.has_execution_withdrawal_credential(spec); let is_correct_source_address = validator @@ -542,16 +542,16 @@ pub fn process_withdrawal_requests( continue; } - let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(index)?; + let pending_balance_to_withdraw = state.get_pending_balance_to_withdraw(validator_index)?; if is_full_exit_request { // Only exit validator if it has no pending withdrawals in the queue if pending_balance_to_withdraw == 0 { - initiate_validator_exit(state, index, spec)? + initiate_validator_exit(state, validator_index, spec)? } continue; } - let balance = state.get_balance(index)?; + let balance = state.get_balance(validator_index)?; let has_sufficient_effective_balance = validator.effective_balance >= spec.min_activation_balance; let has_excess_balance = balance @@ -576,7 +576,7 @@ pub fn process_withdrawal_requests( state .pending_partial_withdrawals_mut()? .push(PendingPartialWithdrawal { - index: index as u64, + validator_index: validator_index as u64, amount: to_withdraw, withdrawable_epoch, })?; @@ -739,8 +739,8 @@ pub fn process_consolidation_request( } let target_validator = state.get_validator(target_index)?; - // Verify the target has execution withdrawal credentials - if !target_validator.has_execution_withdrawal_credential(spec) { + // Verify the target has compounding withdrawal credentials + if !target_validator.has_compounding_withdrawal_credential(spec) { return Ok(()); } @@ -757,6 +757,18 @@ pub fn process_consolidation_request( { return Ok(()); } + // Verify the source has been active long enough + if current_epoch + < source_validator + .activation_epoch + .safe_add(spec.shard_committee_period)? + { + return Ok(()); + } + // Verify the source has no pending withdrawals in the queue + if state.get_pending_balance_to_withdraw(source_index)? > 0 { + return Ok(()); + } // Initiate source validator exit and append pending consolidation let source_exit_epoch = state @@ -772,10 +784,5 @@ pub fn process_consolidation_request( target_index: target_index as u64, })?; - let target_validator = state.get_validator(target_index)?; - // Churn any target excess active balance of target and raise its max - if target_validator.has_eth1_withdrawal_credential(spec) { - state.switch_to_compounding_validator(target_index, spec)?; - } Ok(()) } diff --git a/consensus/state_processing/src/per_epoch_processing/single_pass.rs b/consensus/state_processing/src/per_epoch_processing/single_pass.rs index 904e68e368..a4a81c8eef 100644 --- a/consensus/state_processing/src/per_epoch_processing/single_pass.rs +++ b/consensus/state_processing/src/per_epoch_processing/single_pass.rs @@ -1057,14 +1057,12 @@ fn process_pending_consolidations( } // Calculate the consolidated balance - let max_effective_balance = - source_validator.get_max_effective_balance(spec, state_ctxt.fork_name); let source_effective_balance = std::cmp::min( *state .balances() .get(source_index) .ok_or(BeaconStateError::UnknownValidator(source_index))?, - max_effective_balance, + source_validator.effective_balance, ); // Move active balance to target. Excess balance is withdrawable. diff --git a/consensus/state_processing/src/upgrade/electra.rs b/consensus/state_processing/src/upgrade/electra.rs index 1e64ef2897..0f32e1553d 100644 --- a/consensus/state_processing/src/upgrade/electra.rs +++ b/consensus/state_processing/src/upgrade/electra.rs @@ -14,13 +14,15 @@ pub fn upgrade_to_electra( ) -> Result<(), Error> { let epoch = pre_state.current_epoch(); + let activation_exit_epoch = spec.compute_activation_exit_epoch(epoch)?; let earliest_exit_epoch = pre_state .validators() .iter() .filter(|v| v.exit_epoch != spec.far_future_epoch) .map(|v| v.exit_epoch) .max() - .unwrap_or(epoch) + .unwrap_or(activation_exit_epoch) + .max(activation_exit_epoch) .safe_add(1)?; // The total active balance cache must be built before the consolidation churn limit diff --git a/consensus/types/presets/gnosis/electra.yaml b/consensus/types/presets/gnosis/electra.yaml index 660ed9b64c..42afbb233e 100644 --- a/consensus/types/presets/gnosis/electra.yaml +++ b/consensus/types/presets/gnosis/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/altair.yaml b/consensus/types/presets/mainnet/altair.yaml index 9a17b78032..813ef72122 100644 --- a/consensus/types/presets/mainnet/altair.yaml +++ b/consensus/types/presets/mainnet/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 256 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 32 * 256) +UPDATE_TIMEOUT: 8192 diff --git a/consensus/types/presets/mainnet/electra.yaml b/consensus/types/presets/mainnet/electra.yaml index 660ed9b64c..42afbb233e 100644 --- a/consensus/types/presets/mainnet/electra.yaml +++ b/consensus/types/presets/mainnet/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # `uint64(2**27)` (= 134,217,728) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 134217728 # `uint64(2**18)` (= 262,144) @@ -29,12 +29,12 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- -# 2**13 (= 8192) receipts +# 2**13 (= 8192) deposit requests MAX_DEPOSIT_REQUESTS_PER_PAYLOAD: 8192 # 2**4 (= 16) withdrawal requests MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 @@ -43,3 +43,8 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 16 # --------------------------------------------------------------- # 2**3 ( = 8) pending withdrawals MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 8 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/mainnet/phase0.yaml b/consensus/types/presets/mainnet/phase0.yaml index 02bc96c8cd..00133ba369 100644 --- a/consensus/types/presets/mainnet/phase0.yaml +++ b/consensus/types/presets/mainnet/phase0.yaml @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/presets/minimal/altair.yaml b/consensus/types/presets/minimal/altair.yaml index 88d78bea36..5e472c49cf 100644 --- a/consensus/types/presets/minimal/altair.yaml +++ b/consensus/types/presets/minimal/altair.yaml @@ -22,3 +22,5 @@ EPOCHS_PER_SYNC_COMMITTEE_PERIOD: 8 # --------------------------------------------------------------- # 1 MIN_SYNC_COMMITTEE_PARTICIPANTS: 1 +# SLOTS_PER_EPOCH * EPOCHS_PER_SYNC_COMMITTEE_PERIOD (= 8 * 8) +UPDATE_TIMEOUT: 64 diff --git a/consensus/types/presets/minimal/deneb.yaml b/consensus/types/presets/minimal/deneb.yaml index b1bbc4ee54..c101de3162 100644 --- a/consensus/types/presets/minimal/deneb.yaml +++ b/consensus/types/presets/minimal/deneb.yaml @@ -2,9 +2,9 @@ # Misc # --------------------------------------------------------------- -# [customized] +# `uint64(4096)` FIELD_ELEMENTS_PER_BLOB: 4096 # [customized] -MAX_BLOB_COMMITMENTS_PER_BLOCK: 16 -# [customized] `floorlog2(BLOB_KZG_COMMITMENTS_GINDEX) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 4 = 9 -KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 9 +MAX_BLOB_COMMITMENTS_PER_BLOCK: 32 +# [customized] `floorlog2(get_generalized_index(BeaconBlockBody, 'blob_kzg_commitments')) + 1 + ceillog2(MAX_BLOB_COMMITMENTS_PER_BLOCK)` = 4 + 1 + 5 = 10 +KZG_COMMITMENT_INCLUSION_PROOF_DEPTH: 10 diff --git a/consensus/types/presets/minimal/electra.yaml b/consensus/types/presets/minimal/electra.yaml index ef1ce494d8..44e4769756 100644 --- a/consensus/types/presets/minimal/electra.yaml +++ b/consensus/types/presets/minimal/electra.yaml @@ -10,7 +10,7 @@ MAX_EFFECTIVE_BALANCE_ELECTRA: 2048000000000 # State list lengths # --------------------------------------------------------------- # `uint64(2**27)` (= 134,217,728) -PENDING_BALANCE_DEPOSITS_LIMIT: 134217728 +PENDING_DEPOSITS_LIMIT: 134217728 # [customized] `uint64(2**6)` (= 64) PENDING_PARTIAL_WITHDRAWALS_LIMIT: 64 # [customized] `uint64(2**6)` (= 64) @@ -29,8 +29,8 @@ WHISTLEBLOWER_REWARD_QUOTIENT_ELECTRA: 4096 MAX_ATTESTER_SLASHINGS_ELECTRA: 1 # `uint64(2**3)` (= 8) MAX_ATTESTATIONS_ELECTRA: 8 -# `uint64(2**0)` (= 1) -MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 1 +# `uint64(2**1)` (= 2) +MAX_CONSOLIDATION_REQUESTS_PER_PAYLOAD: 2 # Execution # --------------------------------------------------------------- @@ -41,5 +41,10 @@ MAX_WITHDRAWAL_REQUESTS_PER_PAYLOAD: 2 # Withdrawals processing # --------------------------------------------------------------- -# 2**0 ( = 1) pending withdrawals -MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 1 +# 2**1 ( = 2) pending withdrawals +MAX_PENDING_PARTIALS_PER_WITHDRAWALS_SWEEP: 2 + +# Pending deposits processing +# --------------------------------------------------------------- +# 2**4 ( = 4) pending deposits +MAX_PENDING_DEPOSITS_PER_EPOCH: 16 diff --git a/consensus/types/presets/minimal/phase0.yaml b/consensus/types/presets/minimal/phase0.yaml index 1f75603142..d9a6a2b6c0 100644 --- a/consensus/types/presets/minimal/phase0.yaml +++ b/consensus/types/presets/minimal/phase0.yaml @@ -4,11 +4,11 @@ # --------------------------------------------------------------- # [customized] Just 4 committees for slot for testing purposes MAX_COMMITTEES_PER_SLOT: 4 -# [customized] unsecure, but fast +# [customized] insecure, but fast TARGET_COMMITTEE_SIZE: 4 # 2**11 (= 2,048) MAX_VALIDATORS_PER_COMMITTEE: 2048 -# [customized] Faster, but unsecure. +# [customized] Faster, but insecure. SHUFFLE_ROUND_COUNT: 10 # 4 HYSTERESIS_QUOTIENT: 4 @@ -85,4 +85,4 @@ MAX_ATTESTATIONS: 128 # 2**4 (= 16) MAX_DEPOSITS: 16 # 2**4 (= 16) -MAX_VOLUNTARY_EXITS: 16 +MAX_VOLUNTARY_EXITS: 16 \ No newline at end of file diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index de6077bf94..6f44998cdf 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -46,6 +46,7 @@ mod tests; pub const CACHED_EPOCHS: usize = 3; const MAX_RANDOM_BYTE: u64 = (1 << 8) - 1; +const MAX_RANDOM_VALUE: u64 = (1 << 16) - 1; pub type Validators = List::ValidatorRegistryLimit>; pub type Balances = List::ValidatorRegistryLimit>; @@ -916,6 +917,11 @@ impl BeaconState { } let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; loop { @@ -929,10 +935,10 @@ impl BeaconState { let candidate_index = *indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed)?; + let random_value = self.shuffling_random_value(i, seed)?; let effective_balance = self.get_effective_balance(candidate_index)?; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { return Ok(candidate_index); } @@ -940,6 +946,19 @@ impl BeaconState { } } + /// Fork-aware abstraction for the shuffling. + /// + /// In Electra and later, the random value is a 16-bit integer stored in a `u64`. + /// + /// Prior to Electra, the random value is an 8-bit integer stored in a `u64`. + fn shuffling_random_value(&self, i: usize, seed: &[u8]) -> Result { + if self.fork_name_unchecked().electra_enabled() { + Self::shuffling_random_u16_electra(i, seed).map(u64::from) + } else { + Self::shuffling_random_byte(i, seed).map(u64::from) + } + } + /// Get a random byte from the given `seed`. /// /// Used by the proposer & sync committee selection functions. @@ -953,6 +972,21 @@ impl BeaconState { .ok_or(Error::ShuffleIndexOutOfBounds(index)) } + /// Get two random bytes from the given `seed`. + /// + /// This is used in place of `shuffling_random_byte` from Electra onwards. + fn shuffling_random_u16_electra(i: usize, seed: &[u8]) -> Result { + let mut preimage = seed.to_vec(); + preimage.append(&mut int_to_bytes8(i.safe_div(16)? as u64)); + let offset = i.safe_rem(16)?.safe_mul(2)?; + hash(&preimage) + .get(offset..offset.safe_add(2)?) + .ok_or(Error::ShuffleIndexOutOfBounds(offset))? + .try_into() + .map(u16::from_le_bytes) + .map_err(|_| Error::ShuffleIndexOutOfBounds(offset)) + } + /// Convenience accessor for the `execution_payload_header` as an `ExecutionPayloadHeaderRef`. pub fn latest_execution_payload_header(&self) -> Result, Error> { match self { @@ -1120,6 +1154,11 @@ impl BeaconState { let seed = self.get_seed(epoch, Domain::SyncCommittee, spec)?; let max_effective_balance = spec.max_effective_balance_for_fork(self.fork_name_unchecked()); + let max_random_value = if self.fork_name_unchecked().electra_enabled() { + MAX_RANDOM_VALUE + } else { + MAX_RANDOM_BYTE + }; let mut i = 0; let mut sync_committee_indices = Vec::with_capacity(E::SyncCommitteeSize::to_usize()); @@ -1134,10 +1173,10 @@ impl BeaconState { let candidate_index = *active_validator_indices .get(shuffled_index) .ok_or(Error::ShuffleIndexOutOfBounds(shuffled_index))?; - let random_byte = Self::shuffling_random_byte(i, seed.as_slice())?; + let random_value = self.shuffling_random_value(i, seed.as_slice())?; let effective_balance = self.get_validator(candidate_index)?.effective_balance; - if effective_balance.safe_mul(MAX_RANDOM_BYTE)? - >= max_effective_balance.safe_mul(u64::from(random_byte))? + if effective_balance.safe_mul(max_random_value)? + >= max_effective_balance.safe_mul(random_value)? { sync_committee_indices.push(candidate_index); } @@ -2205,7 +2244,7 @@ impl BeaconState { for withdrawal in self .pending_partial_withdrawals()? .iter() - .filter(|withdrawal| withdrawal.index as usize == validator_index) + .filter(|withdrawal| withdrawal.validator_index as usize == validator_index) { pending_balance.safe_add_assign(withdrawal.amount)?; } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 65f4c37aa1..ea4d8641f6 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -191,6 +191,7 @@ pub struct ChainSpec { pub max_pending_partials_per_withdrawals_sweep: u64, pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, + pub max_blobs_per_block_electra: u64, /* * Fulu hard fork params @@ -623,9 +624,12 @@ impl ChainSpec { } /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for `fork`. - pub fn max_blobs_per_block_by_fork(&self, _fork_name: ForkName) -> u64 { - // TODO(electra): add Electra blobs per block change here - self.max_blobs_per_block + pub fn max_blobs_per_block_by_fork(&self, fork_name: ForkName) -> u64 { + if fork_name.electra_enabled() { + self.max_blobs_per_block_electra + } else { + self.max_blobs_per_block + } } pub fn data_columns_per_subnet(&self) -> usize { @@ -826,6 +830,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -940,7 +945,7 @@ impl ChainSpec { // Electra electra_fork_version: [0x05, 0x00, 0x00, 0x01], electra_fork_epoch: None, - max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 0) + max_pending_partials_per_withdrawals_sweep: u64::checked_pow(2, 1) .expect("pow does not overflow"), min_per_epoch_churn_limit_electra: option_wrapper(|| { u64::checked_pow(2, 6)?.checked_mul(u64::checked_pow(10, 9)?) @@ -1156,6 +1161,7 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -1412,6 +1418,9 @@ pub struct Config { #[serde(default = "default_max_per_epoch_activation_exit_churn_limit")] #[serde(with = "serde_utils::quoted_u64")] max_per_epoch_activation_exit_churn_limit: u64, + #[serde(default = "default_max_blobs_per_block_electra")] + #[serde(with = "serde_utils::quoted_u64")] + max_blobs_per_block_electra: u64, #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] @@ -1554,6 +1563,10 @@ const fn default_max_per_epoch_activation_exit_churn_limit() -> u64 { 256_000_000_000 } +const fn default_max_blobs_per_block_electra() -> u64 { + 9 +} + const fn default_attestation_propagation_slot_range() -> u64 { 32 } @@ -1773,6 +1786,7 @@ impl Config { min_per_epoch_churn_limit_electra: spec.min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit: spec .max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra: spec.max_blobs_per_block_electra, custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, @@ -1850,6 +1864,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, custody_requirement, data_column_sidecar_subnet_count, number_of_columns, @@ -1919,6 +1934,7 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, + max_blobs_per_block_electra, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks), diff --git a/consensus/types/src/eth_spec.rs b/consensus/types/src/eth_spec.rs index 976766dfa9..0bc074072f 100644 --- a/consensus/types/src/eth_spec.rs +++ b/consensus/types/src/eth_spec.rs @@ -3,10 +3,10 @@ use crate::*; use safe_arith::SafeArith; use serde::{Deserialize, Serialize}; use ssz_types::typenum::{ - bit::B0, UInt, U0, U1, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, U134217728, - U16, U16777216, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, U65536, U8, U8192, + bit::B0, UInt, U0, U1, U10, U1024, U1048576, U1073741824, U1099511627776, U128, U131072, + U134217728, U16, U16777216, U17, U2, U2048, U256, U262144, U32, U4, U4096, U512, U625, U64, + U65536, U8, U8192, }; -use ssz_types::typenum::{U17, U9}; use std::fmt::{self, Debug}; use std::str::FromStr; @@ -431,7 +431,7 @@ impl EthSpec for MainnetEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; @@ -466,8 +466,8 @@ impl EthSpec for MinimalEthSpec { type MaxWithdrawalsPerPayload = U4; type FieldElementsPerBlob = U4096; type BytesPerBlob = U131072; - type MaxBlobCommitmentsPerBlock = U16; - type KzgCommitmentInclusionProofDepth = U9; + type MaxBlobCommitmentsPerBlock = U32; + type KzgCommitmentInclusionProofDepth = U10; type PendingPartialWithdrawalsLimit = U64; type PendingConsolidationsLimit = U64; type MaxDepositRequestsPerPayload = U4; @@ -558,7 +558,7 @@ impl EthSpec for GnosisEthSpec { type PendingDepositsLimit = U134217728; type PendingPartialWithdrawalsLimit = U134217728; type PendingConsolidationsLimit = U262144; - type MaxConsolidationRequestsPerPayload = U1; + type MaxConsolidationRequestsPerPayload = U2; type MaxDepositRequestsPerPayload = U8192; type MaxAttesterSlashingsElectra = U1; type MaxAttestationsElectra = U8; diff --git a/consensus/types/src/pending_partial_withdrawal.rs b/consensus/types/src/pending_partial_withdrawal.rs index e5ace7b273..846dd97360 100644 --- a/consensus/types/src/pending_partial_withdrawal.rs +++ b/consensus/types/src/pending_partial_withdrawal.rs @@ -21,7 +21,7 @@ use tree_hash_derive::TreeHash; )] pub struct PendingPartialWithdrawal { #[serde(with = "serde_utils::quoted_u64")] - pub index: u64, + pub validator_index: u64, #[serde(with = "serde_utils::quoted_u64")] pub amount: u64, pub withdrawable_epoch: Epoch, diff --git a/consensus/types/src/preset.rs b/consensus/types/src/preset.rs index f64b7051e5..9a9915e458 100644 --- a/consensus/types/src/preset.rs +++ b/consensus/types/src/preset.rs @@ -234,7 +234,7 @@ pub struct ElectraPreset { #[serde(with = "serde_utils::quoted_u64")] pub max_pending_partials_per_withdrawals_sweep: u64, #[serde(with = "serde_utils::quoted_u64")] - pub pending_balance_deposits_limit: u64, + pub pending_deposits_limit: u64, #[serde(with = "serde_utils::quoted_u64")] pub pending_partial_withdrawals_limit: u64, #[serde(with = "serde_utils::quoted_u64")] @@ -260,7 +260,7 @@ impl ElectraPreset { whistleblower_reward_quotient_electra: spec.whistleblower_reward_quotient_electra, max_pending_partials_per_withdrawals_sweep: spec .max_pending_partials_per_withdrawals_sweep, - pending_balance_deposits_limit: E::pending_deposits_limit() as u64, + pending_deposits_limit: E::pending_deposits_limit() as u64, pending_partial_withdrawals_limit: E::pending_partial_withdrawals_limit() as u64, pending_consolidations_limit: E::pending_consolidations_limit() as u64, max_consolidation_requests_per_payload: E::max_consolidation_requests_per_payload() diff --git a/testing/ef_tests/Makefile b/testing/ef_tests/Makefile index d5f4997bb7..7108e3e8f6 100644 --- a/testing/ef_tests/Makefile +++ b/testing/ef_tests/Makefile @@ -1,4 +1,4 @@ -TESTS_TAG := v1.5.0-alpha.8 +TESTS_TAG := v1.5.0-beta.0 TESTS = general minimal mainnet TARBALLS = $(patsubst %,%-$(TESTS_TAG).tar.gz,$(TESTS)) diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index dacca204c1..bf9e5d6cfa 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -35,6 +35,8 @@ "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot "tests/.*/.*/ssz_static/LightClientSnapshot", + # LightClientDataCollection + "tests/minimal/.*/light_client/data_collection", # One of the EF researchers likes to pack the tarballs on a Mac ".*\\.DS_Store.*", # More Mac weirdness. @@ -48,6 +50,10 @@ "tests/.*/eip6110", "tests/.*/whisk", "tests/.*/eip7594", + # Fulu tests are not yet being run + "tests/.*/fulu", + # TODO(electra): SingleAttestation tests are waiting on Eitan's PR + "tests/.*/electra/ssz_static/SingleAttestation" ] diff --git a/testing/ef_tests/src/cases/genesis_initialization.rs b/testing/ef_tests/src/cases/genesis_initialization.rs index 11402c75e6..210e18f781 100644 --- a/testing/ef_tests/src/cases/genesis_initialization.rs +++ b/testing/ef_tests/src/cases/genesis_initialization.rs @@ -66,8 +66,7 @@ impl LoadCase for GenesisInitialization { impl Case for GenesisInitialization { fn is_enabled_for_fork(fork_name: ForkName) -> bool { - // Altair genesis and later requires real crypto. - fork_name == ForkName::Base || cfg!(not(feature = "fake_crypto")) + fork_name == ForkName::Base } fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { diff --git a/testing/ef_tests/src/cases/genesis_validity.rs b/testing/ef_tests/src/cases/genesis_validity.rs index e977fa3d63..8fb9f2fbdc 100644 --- a/testing/ef_tests/src/cases/genesis_validity.rs +++ b/testing/ef_tests/src/cases/genesis_validity.rs @@ -39,6 +39,10 @@ impl LoadCase for GenesisValidity { } impl Case for GenesisValidity { + fn is_enabled_for_fork(fork_name: ForkName) -> bool { + fork_name == ForkName::Base + } + fn result(&self, _case_index: usize, fork_name: ForkName) -> Result<(), Error> { let spec = &testing_spec::(fork_name); diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index d8fe061061..2e49b1301d 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -350,7 +350,7 @@ where self.supported_forks.contains(&fork_name) } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { // This ensures we only run the tests **once** for `Eip7594`, using the types matching the // correct fork, e.g. `Eip7594` uses SSZ types from `Deneb` as of spec test version // `v1.5.0-alpha.8`, therefore the `Eip7594` tests should get included when testing Deneb types. @@ -362,8 +362,11 @@ where // SszStaticHandler::, MainnetEthSpec>::pre_electra().run(); // SszStaticHandler::, MainnetEthSpec>::electra_only().run(); // ``` + /* TODO(das): re-enable feature_name == FeatureName::Eip7594 && self.supported_forks.contains(&feature_name.fork_name()) + */ + false } } @@ -385,8 +388,10 @@ where BeaconState::::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -410,8 +415,10 @@ where T::name().into() } - fn is_enabled_for_feature(&self, feature_name: FeatureName) -> bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable + // feature_name == FeatureName::Eip7594 + false } } @@ -995,8 +1002,10 @@ impl Handler for KzgInclusionMerkleProofValidityHandler bool { - feature_name == FeatureName::Eip7594 + fn is_enabled_for_feature(&self, _feature_name: FeatureName) -> bool { + // TODO(das): re-enable this + // feature_name == FeatureName::Eip7594 + false } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 691d27951a..7c268123fa 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -237,9 +237,7 @@ macro_rules! ssz_static_test_no_run { #[cfg(feature = "fake_crypto")] mod ssz_static { - use ef_tests::{ - FeatureName, Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler, - }; + use ef_tests::{Handler, SszStaticHandler, SszStaticTHCHandler, SszStaticWithSpecHandler}; use types::historical_summary::HistoricalSummary; use types::{ AttesterSlashingBase, AttesterSlashingElectra, ConsolidationRequest, DepositRequest, @@ -624,6 +622,7 @@ mod ssz_static { SszStaticHandler::::capella_and_later().run(); } + /* FIXME(das): re-enable #[test] fn data_column_sidecar() { SszStaticHandler::, MinimalEthSpec>::deneb_only() @@ -639,6 +638,7 @@ mod ssz_static { SszStaticHandler::::deneb_only() .run_for_feature(FeatureName::Eip7594); } + */ #[test] fn consolidation() { @@ -899,6 +899,7 @@ fn kzg_verify_kzg_proof() { KZGVerifyKZGProofHandler::::default().run(); } +/* FIXME(das): re-enable these tests #[test] fn kzg_compute_cells_and_proofs() { KZGComputeCellsAndKZGProofHandler::::default() @@ -916,6 +917,7 @@ fn kzg_recover_cells_and_proofs() { KZGRecoverCellsAndKZGProofHandler::::default() .run_for_feature(FeatureName::Eip7594); } +*/ #[test] fn beacon_state_merkle_proof_validity() { @@ -947,8 +949,10 @@ fn rewards() { } } +/* FIXME(das): re-enable these tests #[test] fn get_custody_columns() { GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); GetCustodyColumnsHandler::::default().run_for_feature(FeatureName::Eip7594); } +*/ From 93f9c2c7183b6b59cbb02355e2379490fee81d94 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 14 Jan 2025 07:36:27 +0530 Subject: [PATCH 14/15] Execution requests with prefix (#6801) * Exclude empty requests and add back prefix * cleanup * fix after rebase --- .../src/engine_api/json_structures.rs | 87 +++++++++++++------ consensus/types/src/execution_requests.rs | 45 +++++++--- consensus/types/src/lib.rs | 2 +- 3 files changed, 98 insertions(+), 36 deletions(-) diff --git a/beacon_node/execution_layer/src/engine_api/json_structures.rs b/beacon_node/execution_layer/src/engine_api/json_structures.rs index 86acaaaf3b..95b4b50925 100644 --- a/beacon_node/execution_layer/src/engine_api/json_structures.rs +++ b/beacon_node/execution_layer/src/engine_api/json_structures.rs @@ -7,7 +7,7 @@ use superstruct::superstruct; use types::beacon_block_body::KzgCommitments; use types::blob_sidecar::BlobsList; use types::execution_requests::{ - ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests, + ConsolidationRequests, DepositRequests, RequestType, WithdrawalRequests, }; use types::{Blob, FixedVector, KzgProof, Unsigned}; @@ -401,47 +401,80 @@ impl From> for ExecutionPayload { } } +#[derive(Debug, Clone)] +pub enum RequestsError { + InvalidHex(hex::FromHexError), + EmptyRequest(usize), + InvalidOrdering, + InvalidPrefix(u8), + DecodeError(String), +} + /// Format of `ExecutionRequests` received over the engine api. /// -/// Array of ssz-encoded requests list encoded as hex bytes. -/// The prefix of the request type is used to index into the array. -/// -/// For e.g. [0xab, 0xcd, 0xef] -/// Here, 0xab are the deposits bytes (prefix and index == 0) -/// 0xcd are the withdrawals bytes (prefix and index == 1) -/// 0xef are the consolidations bytes (prefix and index == 2) +/// Array of ssz-encoded requests list encoded as hex bytes prefixed +/// with a `RequestType` #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] #[serde(transparent)] pub struct JsonExecutionRequests(pub Vec); impl TryFrom for ExecutionRequests { - type Error = String; + type Error = RequestsError; fn try_from(value: JsonExecutionRequests) -> Result { let mut requests = ExecutionRequests::default(); - + let mut prev_prefix: Option = None; for (i, request) in value.0.into_iter().enumerate() { // hex string let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request)) - .map_err(|e| format!("Invalid hex {:?}", e))?; - match RequestPrefix::from_prefix(i as u8) { - Some(RequestPrefix::Deposit) => { - requests.deposits = DepositRequests::::from_ssz_bytes(&decoded_bytes) - .map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?; + .map_err(RequestsError::InvalidHex)?; + + // The first byte of each element is the `request_type` and the remaining bytes are the `request_data`. + // Elements with empty `request_data` **MUST** be excluded from the list. + let Some((prefix_byte, request_bytes)) = decoded_bytes.split_first() else { + return Err(RequestsError::EmptyRequest(i)); + }; + if request_bytes.is_empty() { + return Err(RequestsError::EmptyRequest(i)); + } + // Elements of the list **MUST** be ordered by `request_type` in ascending order + let current_prefix = RequestType::from_u8(*prefix_byte) + .ok_or(RequestsError::InvalidPrefix(*prefix_byte))?; + if let Some(prev) = prev_prefix { + if prev.to_u8() >= current_prefix.to_u8() { + return Err(RequestsError::InvalidOrdering); } - Some(RequestPrefix::Withdrawal) => { - requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(&decoded_bytes) + } + prev_prefix = Some(current_prefix); + + match current_prefix { + RequestType::Deposit => { + requests.deposits = DepositRequests::::from_ssz_bytes(request_bytes) .map_err(|e| { - format!("Failed to decode WithdrawalRequest from EL: {:?}", e) + RequestsError::DecodeError(format!( + "Failed to decode DepositRequest from EL: {:?}", + e + )) })?; } - Some(RequestPrefix::Consolidation) => { + RequestType::Withdrawal => { + requests.withdrawals = WithdrawalRequests::::from_ssz_bytes(request_bytes) + .map_err(|e| { + RequestsError::DecodeError(format!( + "Failed to decode WithdrawalRequest from EL: {:?}", + e + )) + })?; + } + RequestType::Consolidation => { requests.consolidations = - ConsolidationRequests::::from_ssz_bytes(&decoded_bytes).map_err( - |e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e), - )?; + ConsolidationRequests::::from_ssz_bytes(request_bytes).map_err(|e| { + RequestsError::DecodeError(format!( + "Failed to decode ConsolidationRequest from EL: {:?}", + e + )) + })?; } - None => return Err("Empty requests string".to_string()), } } Ok(requests) @@ -510,7 +543,9 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.execution_requests.try_into()?, + requests: response.execution_requests.try_into().map_err(|e| { + format!("Failed to convert json to execution requests : {:?}", e) + })?, })) } JsonGetPayloadResponse::V5(response) => { @@ -519,7 +554,9 @@ impl TryFrom> for GetPayloadResponse { block_value: response.block_value, blobs_bundle: response.blobs_bundle.into(), should_override_builder: response.should_override_builder, - requests: response.execution_requests.try_into()?, + requests: response.execution_requests.try_into().map_err(|e| { + format!("Failed to convert json to execution requests {:?}", e) + })?, })) } } diff --git a/consensus/types/src/execution_requests.rs b/consensus/types/src/execution_requests.rs index 96a3905420..223c6444cc 100644 --- a/consensus/types/src/execution_requests.rs +++ b/consensus/types/src/execution_requests.rs @@ -43,10 +43,29 @@ impl ExecutionRequests { /// Returns the encoding according to EIP-7685 to send /// to the execution layer over the engine api. pub fn get_execution_requests_list(&self) -> Vec { - let deposit_bytes = Bytes::from(self.deposits.as_ssz_bytes()); - let withdrawal_bytes = Bytes::from(self.withdrawals.as_ssz_bytes()); - let consolidation_bytes = Bytes::from(self.consolidations.as_ssz_bytes()); - vec![deposit_bytes, withdrawal_bytes, consolidation_bytes] + let mut requests_list = Vec::new(); + if !self.deposits.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Deposit.to_u8()] + .into_iter() + .chain(self.deposits.as_ssz_bytes()), + )); + } + if !self.withdrawals.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Withdrawal.to_u8()] + .into_iter() + .chain(self.withdrawals.as_ssz_bytes()), + )); + } + if !self.consolidations.is_empty() { + requests_list.push(Bytes::from_iter( + [RequestType::Consolidation.to_u8()] + .into_iter() + .chain(self.consolidations.as_ssz_bytes()), + )); + } + requests_list } /// Generate the execution layer `requests_hash` based on EIP-7685. @@ -55,9 +74,8 @@ impl ExecutionRequests { pub fn requests_hash(&self) -> Hash256 { let mut hasher = DynamicContext::new(); - for (i, request) in self.get_execution_requests_list().iter().enumerate() { + for request in self.get_execution_requests_list().iter() { let mut request_hasher = DynamicContext::new(); - request_hasher.update(&[i as u8]); request_hasher.update(request); let request_hash = request_hasher.finalize(); @@ -68,16 +86,16 @@ impl ExecutionRequests { } } -/// This is used to index into the `execution_requests` array. +/// The prefix types for `ExecutionRequest` objects. #[derive(Debug, Copy, Clone)] -pub enum RequestPrefix { +pub enum RequestType { Deposit, Withdrawal, Consolidation, } -impl RequestPrefix { - pub fn from_prefix(prefix: u8) -> Option { +impl RequestType { + pub fn from_u8(prefix: u8) -> Option { match prefix { 0 => Some(Self::Deposit), 1 => Some(Self::Withdrawal), @@ -85,6 +103,13 @@ impl RequestPrefix { _ => None, } } + pub fn to_u8(&self) -> u8 { + match self { + Self::Deposit => 0, + Self::Withdrawal => 1, + Self::Consolidation => 2, + } + } } #[cfg(test)] diff --git a/consensus/types/src/lib.rs b/consensus/types/src/lib.rs index 54d8bf51b6..76e414b2f1 100644 --- a/consensus/types/src/lib.rs +++ b/consensus/types/src/lib.rs @@ -172,7 +172,7 @@ pub use crate::execution_payload_header::{ ExecutionPayloadHeaderDeneb, ExecutionPayloadHeaderElectra, ExecutionPayloadHeaderFulu, ExecutionPayloadHeaderRef, ExecutionPayloadHeaderRefMut, }; -pub use crate::execution_requests::{ExecutionRequests, RequestPrefix}; +pub use crate::execution_requests::{ExecutionRequests, RequestType}; pub use crate::fork::Fork; pub use crate::fork_context::ForkContext; pub use crate::fork_data::ForkData; From 587c3e2b8c293787b1b5a7d3b38628a8e4488c49 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Tue, 14 Jan 2025 11:52:30 +0530 Subject: [PATCH 15/15] Implement changes for EIP 7691 (#6803) * Add new config options * Use electra_enabled --- .../lighthouse_network/src/service/mod.rs | 13 ++--- .../lighthouse_network/src/service/utils.rs | 15 +++--- .../lighthouse_network/src/types/mod.rs | 2 +- .../lighthouse_network/src/types/topics.rs | 31 +++++++---- consensus/types/src/chain_spec.rs | 52 +++++++++++++++++-- 5 files changed, 85 insertions(+), 28 deletions(-) diff --git a/beacon_node/lighthouse_network/src/service/mod.rs b/beacon_node/lighthouse_network/src/service/mod.rs index afcbfce173..999803b8fe 100644 --- a/beacon_node/lighthouse_network/src/service/mod.rs +++ b/beacon_node/lighthouse_network/src/service/mod.rs @@ -16,7 +16,7 @@ use crate::rpc::{ use crate::types::{ attestation_sync_committee_topics, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, SnappyTransform, Subnet, SubnetDiscovery, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; use crate::EnrExt; use crate::Eth2Enr; @@ -285,26 +285,23 @@ impl Network { let max_topics = ctx.chain_spec.attestation_subnet_count as usize + SYNC_COMMITTEE_SUBNET_COUNT as usize - + ctx.chain_spec.blob_sidecar_subnet_count as usize + + ctx.chain_spec.blob_sidecar_subnet_count_electra as usize + ctx.chain_spec.data_column_sidecar_subnet_count as usize + BASE_CORE_TOPICS.len() + ALTAIR_CORE_TOPICS.len() - + CAPELLA_CORE_TOPICS.len() - + DENEB_CORE_TOPICS.len() + + CAPELLA_CORE_TOPICS.len() // 0 core deneb and electra topics + LIGHT_CLIENT_GOSSIP_TOPICS.len(); let possible_fork_digests = ctx.fork_context.all_fork_digests(); let filter = gossipsub::MaxCountSubscriptionFilter { filter: utils::create_whitelist_filter( possible_fork_digests, - ctx.chain_spec.attestation_subnet_count, + &ctx.chain_spec, SYNC_COMMITTEE_SUBNET_COUNT, - ctx.chain_spec.blob_sidecar_subnet_count, - ctx.chain_spec.data_column_sidecar_subnet_count, ), // during a fork we subscribe to both the old and new topics max_subscribed_topics: max_topics * 4, - // 418 in theory = (64 attestation + 4 sync committee + 7 core topics + 6 blob topics + 128 column topics) * 2 + // 424 in theory = (64 attestation + 4 sync committee + 7 core topics + 9 blob topics + 128 column topics) * 2 max_subscriptions_per_request: max_topics * 2, }; diff --git a/beacon_node/lighthouse_network/src/service/utils.rs b/beacon_node/lighthouse_network/src/service/utils.rs index 490928c08c..a9eaa002ff 100644 --- a/beacon_node/lighthouse_network/src/service/utils.rs +++ b/beacon_node/lighthouse_network/src/service/utils.rs @@ -236,10 +236,8 @@ pub fn load_or_build_metadata( /// possible fork digests. pub(crate) fn create_whitelist_filter( possible_fork_digests: Vec<[u8; 4]>, - attestation_subnet_count: u64, + spec: &ChainSpec, sync_committee_subnet_count: u64, - blob_sidecar_subnet_count: u64, - data_column_sidecar_subnet_count: u64, ) -> gossipsub::WhitelistSubscriptionFilter { let mut possible_hashes = HashSet::new(); for fork_digest in possible_fork_digests { @@ -259,16 +257,21 @@ pub(crate) fn create_whitelist_filter( add(BlsToExecutionChange); add(LightClientFinalityUpdate); add(LightClientOptimisticUpdate); - for id in 0..attestation_subnet_count { + for id in 0..spec.attestation_subnet_count { add(Attestation(SubnetId::new(id))); } for id in 0..sync_committee_subnet_count { add(SyncCommitteeMessage(SyncSubnetId::new(id))); } - for id in 0..blob_sidecar_subnet_count { + let blob_subnet_count = if spec.electra_fork_epoch.is_some() { + spec.blob_sidecar_subnet_count_electra + } else { + spec.blob_sidecar_subnet_count + }; + for id in 0..blob_subnet_count { add(BlobSidecar(id)); } - for id in 0..data_column_sidecar_subnet_count { + for id in 0..spec.data_column_sidecar_subnet_count { add(DataColumnSidecar(DataColumnSubnetId::new(id))); } } diff --git a/beacon_node/lighthouse_network/src/types/mod.rs b/beacon_node/lighthouse_network/src/types/mod.rs index 6f266fd2ba..a1eedaef74 100644 --- a/beacon_node/lighthouse_network/src/types/mod.rs +++ b/beacon_node/lighthouse_network/src/types/mod.rs @@ -18,5 +18,5 @@ pub use sync_state::{BackFillState, SyncState}; pub use topics::{ attestation_sync_committee_topics, core_topics_to_subscribe, fork_core_topics, subnet_from_topic_hash, GossipEncoding, GossipKind, GossipTopic, ALTAIR_CORE_TOPICS, - BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, DENEB_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, + BASE_CORE_TOPICS, CAPELLA_CORE_TOPICS, LIGHT_CLIENT_GOSSIP_TOPICS, }; diff --git a/beacon_node/lighthouse_network/src/types/topics.rs b/beacon_node/lighthouse_network/src/types/topics.rs index 8cdecc6bfa..475b459ccb 100644 --- a/beacon_node/lighthouse_network/src/types/topics.rs +++ b/beacon_node/lighthouse_network/src/types/topics.rs @@ -41,8 +41,6 @@ pub const LIGHT_CLIENT_GOSSIP_TOPICS: [GossipKind; 2] = [ GossipKind::LightClientOptimisticUpdate, ]; -pub const DENEB_CORE_TOPICS: [GossipKind; 0] = []; - /// Returns the core topics associated with each fork that are new to the previous fork pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> Vec { match fork_name { @@ -56,11 +54,16 @@ pub fn fork_core_topics(fork_name: &ForkName, spec: &ChainSpec) -> V for i in 0..spec.blob_sidecar_subnet_count { deneb_blob_topics.push(GossipKind::BlobSidecar(i)); } - let mut deneb_topics = DENEB_CORE_TOPICS.to_vec(); - deneb_topics.append(&mut deneb_blob_topics); - deneb_topics + deneb_blob_topics + } + ForkName::Electra => { + // All of electra blob topics are core topics + let mut electra_blob_topics = Vec::new(); + for i in 0..spec.blob_sidecar_subnet_count_electra { + electra_blob_topics.push(GossipKind::BlobSidecar(i)); + } + electra_blob_topics } - ForkName::Electra => vec![], ForkName::Fulu => vec![], } } @@ -88,7 +91,12 @@ pub fn core_topics_to_subscribe( topics.extend(previous_fork_topics); current_fork = previous_fork; } + // Remove duplicates topics + .into_iter() + .collect::>() + .into_iter() + .collect() } /// A gossipsub topic which encapsulates the type of messages that should be sent and received over @@ -467,16 +475,19 @@ mod tests { type E = MainnetEthSpec; let spec = E::default_spec(); let mut all_topics = Vec::new(); + let mut electra_core_topics = fork_core_topics::(&ForkName::Electra, &spec); let mut deneb_core_topics = fork_core_topics::(&ForkName::Deneb, &spec); + all_topics.append(&mut electra_core_topics); all_topics.append(&mut deneb_core_topics); all_topics.extend(CAPELLA_CORE_TOPICS); all_topics.extend(ALTAIR_CORE_TOPICS); all_topics.extend(BASE_CORE_TOPICS); let latest_fork = *ForkName::list_all().last().unwrap(); - assert_eq!( - core_topics_to_subscribe::(latest_fork, &spec), - all_topics - ); + let core_topics = core_topics_to_subscribe::(latest_fork, &spec); + // Need to check all the topics exist in an order independent manner + for topic in all_topics { + assert!(core_topics.contains(&topic)); + } } } diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index ea4d8641f6..6594f3c44e 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -191,7 +191,6 @@ pub struct ChainSpec { pub max_pending_partials_per_withdrawals_sweep: u64, pub min_per_epoch_churn_limit_electra: u64, pub max_per_epoch_activation_exit_churn_limit: u64, - pub max_blobs_per_block_electra: u64, /* * Fulu hard fork params @@ -240,6 +239,13 @@ pub struct ChainSpec { pub blob_sidecar_subnet_count: u64, max_blobs_per_block: u64, + /* + * Networking Electra + */ + max_blobs_per_block_electra: u64, + pub blob_sidecar_subnet_count_electra: u64, + pub max_request_blob_sidecars_electra: u64, + /* * Networking Derived * @@ -618,6 +624,14 @@ impl ChainSpec { } } + pub fn max_request_blob_sidecars(&self, fork_name: ForkName) -> usize { + if fork_name.electra_enabled() { + self.max_request_blob_sidecars_electra as usize + } else { + self.max_request_blob_sidecars as usize + } + } + /// Return the value of `MAX_BLOBS_PER_BLOCK` appropriate for the fork at `epoch`. pub fn max_blobs_per_block(&self, epoch: Epoch) -> u64 { self.max_blobs_per_block_by_fork(self.fork_name_at_epoch(epoch)) @@ -830,7 +844,6 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -886,6 +899,13 @@ impl ChainSpec { max_blobs_by_root_request: default_max_blobs_by_root_request(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking Electra specific + */ + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), + blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), + max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* * Application specific */ @@ -1161,7 +1181,6 @@ impl ChainSpec { u64::checked_pow(2, 8)?.checked_mul(u64::checked_pow(10, 9)?) }) .expect("calculation does not overflow"), - max_blobs_per_block_electra: default_max_blobs_per_block_electra(), /* * Fulu hard fork params @@ -1216,6 +1235,13 @@ impl ChainSpec { max_blobs_by_root_request: default_max_blobs_by_root_request(), max_data_columns_by_root_request: default_data_columns_by_root_request(), + /* + * Networking Electra specific + */ + max_blobs_per_block_electra: default_max_blobs_per_block_electra(), + blob_sidecar_subnet_count_electra: default_blob_sidecar_subnet_count_electra(), + max_request_blob_sidecars_electra: default_max_request_blob_sidecars_electra(), + /* * Application specific */ @@ -1421,6 +1447,12 @@ pub struct Config { #[serde(default = "default_max_blobs_per_block_electra")] #[serde(with = "serde_utils::quoted_u64")] max_blobs_per_block_electra: u64, + #[serde(default = "default_blob_sidecar_subnet_count_electra")] + #[serde(with = "serde_utils::quoted_u64")] + pub blob_sidecar_subnet_count_electra: u64, + #[serde(default = "default_max_request_blob_sidecars_electra")] + #[serde(with = "serde_utils::quoted_u64")] + max_request_blob_sidecars_electra: u64, #[serde(default = "default_custody_requirement")] #[serde(with = "serde_utils::quoted_u64")] @@ -1555,6 +1587,14 @@ const fn default_max_blobs_per_block() -> u64 { 6 } +const fn default_blob_sidecar_subnet_count_electra() -> u64 { + 9 +} + +const fn default_max_request_blob_sidecars_electra() -> u64 { + 1152 +} + const fn default_min_per_epoch_churn_limit_electra() -> u64 { 128_000_000_000 } @@ -1787,6 +1827,8 @@ impl Config { max_per_epoch_activation_exit_churn_limit: spec .max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra: spec.max_blobs_per_block_electra, + blob_sidecar_subnet_count_electra: spec.blob_sidecar_subnet_count_electra, + max_request_blob_sidecars_electra: spec.max_request_blob_sidecars_electra, custody_requirement: spec.custody_requirement, data_column_sidecar_subnet_count: spec.data_column_sidecar_subnet_count, @@ -1865,6 +1907,8 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra, + blob_sidecar_subnet_count_electra, + max_request_blob_sidecars_electra, custody_requirement, data_column_sidecar_subnet_count, number_of_columns, @@ -1935,6 +1979,8 @@ impl Config { min_per_epoch_churn_limit_electra, max_per_epoch_activation_exit_churn_limit, max_blobs_per_block_electra, + max_request_blob_sidecars_electra, + blob_sidecar_subnet_count_electra, // We need to re-derive any values that might have changed in the config. max_blocks_by_root_request: max_blocks_by_root_request_common(max_request_blocks),