From f51a292f77575a1786af34271fb44954f141c377 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Fri, 3 Jan 2025 20:27:21 +0100 Subject: [PATCH 1/8] fully lint only explicitly to avoid unnecessary rebuilds (#6753) * fully lint only explicitly to avoid unnecessary rebuilds --- .github/workflows/test-suite.yml | 2 +- Makefile | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 65663e0cf4..45f3b757e7 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -350,7 +350,7 @@ jobs: - name: Check formatting with cargo fmt run: make cargo-fmt - name: Lint code for quality and style with Clippy - run: make lint + run: make lint-full - name: Certify Cargo.lock freshness run: git diff --exit-code Cargo.lock - name: Typecheck benchmark code without running it diff --git a/Makefile b/Makefile index 958abf8705..8faf8a2e54 100644 --- a/Makefile +++ b/Makefile @@ -204,7 +204,7 @@ test-full: cargo-fmt test-release test-debug test-ef test-exec-engine # Lints the code for bad style and potentially unsafe arithmetic using Clippy. # Clippy lints are opt-in per-crate for now. By default, everything is allowed except for performance and correctness lints. lint: - RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ + cargo clippy --workspace --benches --tests $(EXTRA_CLIPPY_OPTS) --features "$(TEST_FEATURES)" -- \ -D clippy::fn_to_numeric_cast_any \ -D clippy::manual_let_else \ -D clippy::large_stack_frames \ @@ -220,6 +220,10 @@ lint: lint-fix: EXTRA_CLIPPY_OPTS="--fix --allow-staged --allow-dirty" $(MAKE) lint +# Also run the lints on the optimized-only tests +lint-full: + RUSTFLAGS="-C debug-assertions=no $(RUSTFLAGS)" $(MAKE) lint + # Runs the makefile in the `ef_tests` repo. # # May download and extract an archive of test vectors from the ethereum From 84519010f29b7f8ac50cb2e68ec6ffed69a6e6f2 Mon Sep 17 00:00:00 2001 From: realbigsean Date: Tue, 7 Jan 2025 16:39:48 -0800 Subject: [PATCH 2/8] add joao CODEOWNERS (#6762) * add joao CODEOWNERS --- .github/CODEOWNERS | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..f9478d1369 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,2 @@ +beacon_node/network/ @jxs +beacon_node/lighthouse_network/ @jxs From 57141d8b4bab2d8281a01629de04d9a935f00d1c Mon Sep 17 00:00:00 2001 From: Ekaterina Riazantseva Date: Wed, 8 Jan 2025 01:39:53 +0100 Subject: [PATCH 3/8] Add 'beacon_' prefix to PeerDAS metrics names (#6537) * Add 'beacon_' prefix to PeerDAS metrics names * Merge remote-tracking branch 'origin/unstable' into peerdas-metrics * Merge 'origin/unstable' into peerdas-metrics * Merge remote-tracking branch 'origin/unstable/ into peerdas-metrics * Add 'beacon_' prefix to 'kzg_data_column' metrics --- beacon_node/beacon_chain/src/metrics.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index c6aa9fbcac..8d71e895c9 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1656,7 +1656,7 @@ pub static BLOB_SIDECAR_INCLUSION_PROOF_COMPUTATION: LazyLock> }); pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = LazyLock::new(|| { try_create_histogram_vec_with_buckets( - "data_column_sidecar_computation_seconds", + "beacon_data_column_sidecar_computation_seconds", "Time taken to compute data column sidecar, including cells, proofs and inclusion proof", Ok(vec![0.1, 0.15, 0.25, 0.35, 0.5, 0.7, 1.0, 2.5, 5.0, 10.0]), &["blob_count"], @@ -1665,7 +1665,7 @@ pub static DATA_COLUMN_SIDECAR_COMPUTATION: LazyLock> = Laz pub static DATA_COLUMN_SIDECAR_INCLUSION_PROOF_VERIFICATION: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_column_sidecar_inclusion_proof_verification_seconds", + "beacon_data_column_sidecar_inclusion_proof_verification_seconds", "Time taken to verify data_column sidecar inclusion proof", ) }); @@ -1847,7 +1847,7 @@ pub static KZG_VERIFICATION_BATCH_TIMES: LazyLock> = LazyLock: pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_single_seconds", + "beacon_kzg_verification_data_column_single_seconds", "Runtime of single data column kzg verification", Ok(vec![ 0.0005, 0.001, 0.0015, 0.002, 0.003, 0.004, 0.005, 0.007, 0.01, 0.02, 0.05, @@ -1857,7 +1857,7 @@ pub static KZG_VERIFICATION_DATA_COLUMN_SINGLE_TIMES: LazyLock pub static KZG_VERIFICATION_DATA_COLUMN_BATCH_TIMES: LazyLock> = LazyLock::new(|| { try_create_histogram_with_buckets( - "kzg_verification_data_column_batch_seconds", + "beacon_kzg_verification_data_column_batch_seconds", "Runtime of batched data column kzg verification", Ok(vec![ 0.002, 0.004, 0.006, 0.008, 0.01, 0.012, 0.015, 0.02, 0.03, 0.05, 0.07, @@ -1910,14 +1910,14 @@ pub static DATA_AVAILABILITY_OVERFLOW_STORE_CACHE_SIZE: LazyLock> = LazyLock::new(|| { try_create_histogram( - "data_availability_reconstruction_time_seconds", + "beacon_data_availability_reconstruction_time_seconds", "Time taken to reconstruct columns", ) }); pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "data_availability_reconstructed_columns_total", + "beacon_data_availability_reconstructed_columns_total", "Total count of reconstructed columns", ) }); @@ -1925,7 +1925,7 @@ pub static DATA_AVAILABILITY_RECONSTRUCTED_COLUMNS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_attempts", + "beacon_kzg_data_column_reconstruction_attempts", "Count of times data column reconstruction has been attempted", ) }); @@ -1933,7 +1933,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_ATTEMPTS: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "kzg_data_column_reconstruction_failures", + "beacon_kzg_data_column_reconstruction_failures", "Count of times data column reconstruction has failed", ) }); @@ -1941,7 +1941,7 @@ pub static KZG_DATA_COLUMN_RECONSTRUCTION_FAILURES: LazyLock> pub static KZG_DATA_COLUMN_RECONSTRUCTION_INCOMPLETE_TOTAL: LazyLock> = LazyLock::new(|| { try_create_int_counter_vec( - "kzg_data_column_reconstruction_incomplete_total", + "beacon_kzg_data_column_reconstruction_incomplete_total", "Count of times data column reconstruction attempts did not result in an import", &["reason"], ) From 7ec748a108bdef9fbe02ae9edb2f49f2682a555f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:34 +1100 Subject: [PATCH 4/8] Implement `getBlobSidecars` support for PeerDAS (#6755) * Implement getBlobSidecars endpoint for PeerDAS. * Merge branch 'unstable' into peerdas-get-blob-sidecars * Fix incorrect logging. * Replace `and_then` usage. --- beacon_node/beacon_chain/src/kzg_utils.rs | 143 ++++++++++++++++++++-- beacon_node/http_api/src/block_id.rs | 84 ++++++++++--- consensus/types/src/blob_sidecar.rs | 6 +- 3 files changed, 202 insertions(+), 31 deletions(-) diff --git a/beacon_node/beacon_chain/src/kzg_utils.rs b/beacon_node/beacon_chain/src/kzg_utils.rs index 1680c0298d..bd47e82215 100644 --- a/beacon_node/beacon_chain/src/kzg_utils.rs +++ b/beacon_node/beacon_chain/src/kzg_utils.rs @@ -7,8 +7,9 @@ use std::sync::Arc; use types::beacon_block_body::KzgCommitments; use types::data_column_sidecar::{Cell, DataColumn, DataColumnSidecarError}; use types::{ - Blob, ChainSpec, ColumnIndex, DataColumnSidecar, DataColumnSidecarList, EthSpec, Hash256, - KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, SignedBeaconBlockHeader, + Blob, BlobSidecar, BlobSidecarList, ChainSpec, ColumnIndex, DataColumnSidecar, + DataColumnSidecarList, EthSpec, Hash256, KzgCommitment, KzgProof, KzgProofs, SignedBeaconBlock, + SignedBeaconBlockHeader, SignedBlindedBeaconBlock, }; /// Converts a blob ssz List object to an array to be used with the kzg @@ -243,6 +244,83 @@ fn build_data_column_sidecars( Ok(sidecars) } +/// Reconstruct blobs from a subset of data column sidecars (requires at least 50%). +/// +/// If `blob_indices_opt` is `None`, this function attempts to reconstruct all blobs associated +/// with the block. +pub fn reconstruct_blobs( + kzg: &Kzg, + data_columns: &[Arc>], + blob_indices_opt: Option>, + signed_block: &SignedBlindedBeaconBlock, +) -> Result, String> { + // The data columns are from the database, so we assume their correctness. + let first_data_column = data_columns + .first() + .ok_or("data_columns should have at least one element".to_string())?; + + let blob_indices: Vec = match blob_indices_opt { + Some(indices) => indices.into_iter().map(|i| i as usize).collect(), + None => { + let num_of_blobs = first_data_column.kzg_commitments.len(); + (0..num_of_blobs).collect() + } + }; + + let blob_sidecars = blob_indices + .into_par_iter() + .map(|row_index| { + let mut cells: Vec = vec![]; + let mut cell_ids: Vec = vec![]; + for data_column in data_columns { + let cell = data_column + .column + .get(row_index) + .ok_or(format!("Missing data column at row index {row_index}")) + .and_then(|cell| { + ssz_cell_to_crypto_cell::(cell).map_err(|e| format!("{e:?}")) + })?; + + cells.push(cell); + cell_ids.push(data_column.index); + } + + let (cells, _kzg_proofs) = kzg + .recover_cells_and_compute_kzg_proofs(&cell_ids, &cells) + .map_err(|e| format!("Failed to recover cells and compute KZG proofs: {e:?}"))?; + + let num_cells_original_blob = cells.len() / 2; + let blob_bytes = cells + .into_iter() + .take(num_cells_original_blob) + .flat_map(|cell| cell.into_iter()) + .collect(); + + let blob = Blob::::new(blob_bytes).map_err(|e| format!("{e:?}"))?; + let kzg_commitment = first_data_column + .kzg_commitments + .get(row_index) + .ok_or(format!("Missing KZG commitment for blob {row_index}"))?; + let kzg_proof = compute_blob_kzg_proof::(kzg, &blob, *kzg_commitment) + .map_err(|e| format!("{e:?}"))?; + + BlobSidecar::::new_with_existing_proof( + row_index, + blob, + signed_block, + first_data_column.signed_block_header.clone(), + &first_data_column.kzg_commitments_inclusion_proof, + kzg_proof, + ) + .map(Arc::new) + .map_err(|e| format!("{e:?}")) + }) + .collect::, _>>()? + .into(); + + Ok(blob_sidecars) +} + /// Reconstruct all data columns from a subset of data column sidecars (requires at least 50%). pub fn reconstruct_data_columns( kzg: &Kzg, @@ -265,7 +343,7 @@ pub fn reconstruct_data_columns( for data_column in data_columns { let cell = data_column.column.get(row_index).ok_or( KzgError::InconsistentArrayLength(format!( - "Missing data column at index {row_index}" + "Missing data column at row index {row_index}" )), )?; @@ -289,12 +367,16 @@ pub fn reconstruct_data_columns( #[cfg(test)] mod test { - use crate::kzg_utils::{blobs_to_data_column_sidecars, reconstruct_data_columns}; + use crate::kzg_utils::{ + blobs_to_data_column_sidecars, reconstruct_blobs, reconstruct_data_columns, + }; use bls::Signature; + use eth2::types::BlobsBundle; + use execution_layer::test_utils::generate_blobs; use kzg::{trusted_setup::get_trusted_setup, Kzg, KzgCommitment, TrustedSetup}; use types::{ - beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, Blob, BlobsList, - ChainSpec, EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, + beacon_block_body::KzgCommitments, BeaconBlock, BeaconBlockDeneb, BlobsList, ChainSpec, + EmptyBlock, EthSpec, MainnetEthSpec, SignedBeaconBlock, }; type E = MainnetEthSpec; @@ -308,6 +390,7 @@ mod test { test_build_data_columns_empty(&kzg, &spec); test_build_data_columns(&kzg, &spec); test_reconstruct_data_columns(&kzg, &spec); + test_reconstruct_blobs_from_data_columns(&kzg, &spec); } #[track_caller] @@ -379,6 +462,36 @@ mod test { } } + #[track_caller] + fn test_reconstruct_blobs_from_data_columns(kzg: &Kzg, spec: &ChainSpec) { + let num_of_blobs = 6; + let (signed_block, blobs) = create_test_block_and_blobs::(num_of_blobs, spec); + let blob_refs = blobs.iter().collect::>(); + let column_sidecars = + blobs_to_data_column_sidecars(&blob_refs, &signed_block, kzg, spec).unwrap(); + + // Now reconstruct + let signed_blinded_block = signed_block.into(); + let blob_indices = vec![3, 4, 5]; + let reconstructed_blobs = reconstruct_blobs( + kzg, + &column_sidecars.iter().as_slice()[0..column_sidecars.len() / 2], + Some(blob_indices.clone()), + &signed_blinded_block, + ) + .unwrap(); + + for i in blob_indices { + let reconstructed_blob = &reconstructed_blobs + .iter() + .find(|sidecar| sidecar.index == i) + .map(|sidecar| sidecar.blob.clone()) + .expect("reconstructed blob should exist"); + let original_blob = blobs.get(i as usize).unwrap(); + assert_eq!(reconstructed_blob, original_blob, "{i}"); + } + } + fn get_kzg() -> Kzg { let trusted_setup: TrustedSetup = serde_json::from_reader(get_trusted_setup().as_slice()) .map_err(|e| format!("Unable to read trusted setup file: {}", e)) @@ -397,12 +510,20 @@ mod test { KzgCommitments::::new(vec![KzgCommitment::empty_for_testing(); num_of_blobs]) .unwrap(); - let signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + let mut signed_block = SignedBeaconBlock::from_block(block, Signature::empty()); + + let (blobs_bundle, _) = generate_blobs::(num_of_blobs).unwrap(); + let BlobsBundle { + blobs, + commitments, + proofs: _, + } = blobs_bundle; - let blobs = (0..num_of_blobs) - .map(|_| Blob::::default()) - .collect::>() - .into(); + *signed_block + .message_mut() + .body_mut() + .blob_kzg_commitments_mut() + .unwrap() = commitments; (signed_block, blobs) } diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index dba8eb1ef3..b9e4883318 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -1,4 +1,5 @@ use crate::{state_id::checkpoint_slot_and_execution_optimistic, ExecutionOptimistic}; +use beacon_chain::kzg_utils::reconstruct_blobs; use beacon_chain::{BeaconChain, BeaconChainError, BeaconChainTypes, WhenSlotSkipped}; use eth2::types::BlobIndicesQuery; use eth2::types::BlockId as CoreBlockId; @@ -9,6 +10,7 @@ use types::{ BlobSidecarList, EthSpec, FixedBytesExtended, Hash256, SignedBeaconBlock, SignedBlindedBeaconBlock, Slot, }; +use warp::Rejection; /// Wraps `eth2::types::BlockId` and provides a simple way to obtain a block or root for a given /// `BlockId`. @@ -261,7 +263,7 @@ impl BlockId { #[allow(clippy::type_complexity)] pub fn get_blinded_block_and_blob_list_filtered( &self, - indices: BlobIndicesQuery, + query: BlobIndicesQuery, chain: &BeaconChain, ) -> Result< ( @@ -286,20 +288,32 @@ impl BlockId { // Return the `BlobSidecarList` identified by `self`. let blob_sidecar_list = if !blob_kzg_commitments.is_empty() { - chain - .store - .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? - .ok_or_else(|| { - warp_utils::reject::custom_not_found(format!( - "no blobs stored for block {root}" - )) - })? + if chain.spec.is_peer_das_enabled_for_epoch(block.epoch()) { + Self::get_blobs_from_data_columns(chain, root, query.indices, &block)? + } else { + Self::get_blobs(chain, root, query.indices)? + } } else { BlobSidecarList::default() }; - let blob_sidecar_list_filtered = match indices.indices { + Ok((block, blob_sidecar_list, execution_optimistic, finalized)) + } + + fn get_blobs( + chain: &BeaconChain, + root: Hash256, + indices: Option>, + ) -> Result, Rejection> { + let blob_sidecar_list = chain + .store + .get_blobs(&root) + .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .ok_or_else(|| { + warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) + })?; + + let blob_sidecar_list_filtered = match indices { Some(vec) => { let list = blob_sidecar_list .into_iter() @@ -310,12 +324,48 @@ impl BlockId { } None => blob_sidecar_list, }; - Ok(( - block, - blob_sidecar_list_filtered, - execution_optimistic, - finalized, - )) + + Ok(blob_sidecar_list_filtered) + } + + fn get_blobs_from_data_columns( + chain: &BeaconChain, + root: Hash256, + blob_indices: Option>, + block: &SignedBlindedBeaconBlock<::EthSpec>, + ) -> Result, Rejection> { + let column_indices = chain.store.get_data_column_keys(root).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error fetching data columns keys: {e:?}" + )) + })?; + + let num_found_column_keys = column_indices.len(); + let num_required_columns = chain.spec.number_of_columns / 2; + let is_blob_available = num_found_column_keys >= num_required_columns; + + if is_blob_available { + let data_columns = column_indices + .into_iter() + .filter_map( + |column_index| match chain.get_data_column(&root, &column_index) { + Ok(Some(data_column)) => Some(Ok(data_column)), + Ok(None) => None, + Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + }, + ) + .collect::, _>>()?; + + reconstruct_blobs(&chain.kzg, &data_columns, blob_indices, block).map_err(|e| { + warp_utils::reject::custom_server_error(format!( + "Error reconstructing data columns: {e:?}" + )) + }) + } else { + Err(warp_utils::reject::custom_server_error( + format!("Insufficient data columns to reconstruct blobs: required {num_required_columns}, but only {num_found_column_keys} were found.") + )) + } } } diff --git a/consensus/types/src/blob_sidecar.rs b/consensus/types/src/blob_sidecar.rs index 5a330388cc..302aa2a4c1 100644 --- a/consensus/types/src/blob_sidecar.rs +++ b/consensus/types/src/blob_sidecar.rs @@ -1,9 +1,9 @@ use crate::test_utils::TestRandom; -use crate::ForkName; use crate::{ beacon_block_body::BLOB_KZG_COMMITMENTS_INDEX, BeaconBlockHeader, BeaconStateError, Blob, Epoch, EthSpec, FixedVector, Hash256, SignedBeaconBlockHeader, Slot, VariableList, }; +use crate::{AbstractExecPayload, ForkName}; use crate::{ForkVersionDeserialize, KzgProofs, SignedBeaconBlock}; use bls::Signature; use derivative::Derivative; @@ -150,10 +150,10 @@ impl BlobSidecar { }) } - pub fn new_with_existing_proof( + pub fn new_with_existing_proof>( index: usize, blob: Blob, - signed_block: &SignedBeaconBlock, + signed_block: &SignedBeaconBlock, signed_block_header: SignedBeaconBlockHeader, kzg_commitments_inclusion_proof: &[Hash256], kzg_proof: KzgProof, From 80cfbea7fe4c78d90638b256b0cb7fc19652b31f Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:37 +1100 Subject: [PATCH 5/8] Fix incorrect data column metric name (#6761) * Fix incorrect data column metric name. --- beacon_node/beacon_chain/src/metrics.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon_node/beacon_chain/src/metrics.rs b/beacon_node/beacon_chain/src/metrics.rs index 8d71e895c9..ae3add7f03 100644 --- a/beacon_node/beacon_chain/src/metrics.rs +++ b/beacon_node/beacon_chain/src/metrics.rs @@ -1693,7 +1693,7 @@ pub static DATA_COLUMN_SIDECAR_GOSSIP_VERIFICATION_TIMES: LazyLock> = LazyLock::new(|| { try_create_int_counter( - "beacon_blobs_column_sidecar_processing_successes_total", + "beacon_data_column_sidecar_processing_successes_total", "Number of data column sidecars verified for gossip", ) }); From 87b72dec21759acfbc749220be3aee11ac91cdf3 Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Wed, 8 Jan 2025 14:12:39 +1100 Subject: [PATCH 6/8] Fix incorrect VC default HTTP token path when the `--datadir` flag is present (#6748) * Fix incorrect default http token path when datadir flag is present. --- lighthouse/tests/validator_client.rs | 15 ++++++++++++++- validator_client/http_api/src/lib.rs | 1 + validator_client/src/config.rs | 9 +++++---- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/lighthouse/tests/validator_client.rs b/lighthouse/tests/validator_client.rs index c5b303e4d1..1945399c86 100644 --- a/lighthouse/tests/validator_client.rs +++ b/lighthouse/tests/validator_client.rs @@ -345,7 +345,7 @@ fn http_store_keystore_passwords_in_secrets_dir_present() { } #[test] -fn http_token_path_flag() { +fn http_token_path_flag_present() { let dir = TempDir::new().expect("Unable to create temporary directory"); CommandLineTest::new() .flag("http", None) @@ -359,6 +359,19 @@ fn http_token_path_flag() { }); } +#[test] +fn http_token_path_default() { + CommandLineTest::new() + .flag("http", None) + .run() + .with_config(|config| { + assert_eq!( + config.http_api.http_token_path, + config.validator_dir.join("api-token.txt") + ); + }); +} + // Tests for Metrics flags. #[test] fn metrics_flag() { diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index f3dab3780c..73ebe717af 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -106,6 +106,7 @@ pub struct Config { impl Default for Config { fn default() -> Self { + // This value is always overridden when building config from CLI. let http_token_path = dirs::home_dir() .unwrap_or_else(|| PathBuf::from(".")) .join(DEFAULT_ROOT_DIR) diff --git a/validator_client/src/config.rs b/validator_client/src/config.rs index 0fecb5202d..bb72ef81c8 100644 --- a/validator_client/src/config.rs +++ b/validator_client/src/config.rs @@ -314,10 +314,11 @@ impl Config { config.http_api.store_passwords_in_secrets_dir = true; } - if cli_args.get_one::("http-token-path").is_some() { - config.http_api.http_token_path = parse_required(cli_args, "http-token-path") - // For backward compatibility, default to the path under the validator dir if not provided. - .unwrap_or_else(|_| config.validator_dir.join(PK_FILENAME)); + if let Some(http_token_path) = cli_args.get_one::("http-token-path") { + config.http_api.http_token_path = PathBuf::from(http_token_path); + } else { + // For backward compatibility, default to the path under the validator dir if not provided. + config.http_api.http_token_path = config.validator_dir.join(PK_FILENAME); } /* From 1f6850fae2807c1d3f0e281524e0b1b9ab230e67 Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Fri, 10 Jan 2025 06:43:29 +0530 Subject: [PATCH 7/8] Rust 1.84 lints (#6781) * Fix few lints * Fix remaining lints * Use fully qualified syntax --- beacon_node/beacon_chain/src/beacon_chain.rs | 10 +++++----- beacon_node/beacon_chain/src/canonical_head.rs | 6 +----- .../beacon_chain/src/data_availability_checker.rs | 6 +++--- .../overflow_lru_cache.rs | 9 +++------ .../beacon_chain/src/early_attester_cache.rs | 2 +- beacon_node/beacon_chain/src/eth1_chain.rs | 2 +- beacon_node/beacon_chain/src/execution_payload.rs | 8 ++++---- .../beacon_chain/src/graffiti_calculator.rs | 7 ++----- .../beacon_chain/src/observed_aggregates.rs | 2 +- beacon_node/beacon_chain/src/observed_attesters.rs | 4 ++-- .../beacon_chain/src/observed_data_sidecars.rs | 2 +- beacon_node/beacon_chain/src/shuffling_cache.rs | 2 +- beacon_node/beacon_processor/src/lib.rs | 2 +- beacon_node/client/src/builder.rs | 2 +- beacon_node/client/src/notifier.rs | 8 +++----- beacon_node/execution_layer/src/engine_api/http.rs | 10 ++++------ beacon_node/execution_layer/src/lib.rs | 2 +- beacon_node/execution_layer/src/payload_status.rs | 2 +- .../src/test_utils/mock_execution_layer.rs | 2 +- beacon_node/genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 4 ++-- beacon_node/http_api/src/validator.rs | 2 +- beacon_node/http_api/tests/interactive_tests.rs | 2 +- beacon_node/http_api/tests/tests.rs | 2 +- .../lighthouse_network/gossipsub/src/backoff.rs | 2 +- .../lighthouse_network/gossipsub/src/behaviour.rs | 3 +-- beacon_node/lighthouse_network/src/config.rs | 6 +++--- .../src/discovery/subnet_predicate.rs | 4 ++-- .../lighthouse_network/src/peer_manager/peerdb.rs | 2 +- .../src/peer_manager/peerdb/peer_info.rs | 4 ++-- .../src/network_beacon_processor/gossip_methods.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 14 +++++--------- beacon_node/store/src/forwards_iter.rs | 4 ++-- beacon_node/store/src/reconstruct.rs | 2 +- beacon_node/store/src/state_cache.rs | 8 ++------ common/account_utils/src/validator_definitions.rs | 2 +- common/logging/src/lib.rs | 2 +- consensus/proto_array/src/proto_array.rs | 10 +++++----- consensus/state_processing/src/genesis.rs | 12 ++++++------ .../per_block_processing/altair/sync_committee.rs | 2 +- .../epoch_processing_summary.rs | 12 ++++++------ consensus/types/src/beacon_block_body.rs | 2 +- consensus/types/src/beacon_state.rs | 2 +- .../src/beacon_state/progressive_balances_cache.rs | 2 +- consensus/types/src/chain_spec.rs | 10 ++++------ consensus/types/src/deposit_tree_snapshot.rs | 3 +-- consensus/types/src/graffiti.rs | 2 +- lcli/src/transition_blocks.rs | 4 ++-- slasher/src/database.rs | 2 +- testing/ef_tests/src/cases/fork_choice.rs | 2 +- testing/ef_tests/src/cases/operations.rs | 4 ++-- testing/ef_tests/src/decode.rs | 2 +- validator_client/doppelganger_service/src/lib.rs | 2 +- .../slashing_protection/src/slashing_database.rs | 4 +--- .../validator_services/src/preparation_service.rs | 2 +- validator_client/validator_services/src/sync.rs | 2 +- validator_manager/src/create_validators.rs | 4 ++-- validator_manager/src/delete_validators.rs | 2 +- validator_manager/src/import_validators.rs | 2 +- validator_manager/src/list_validators.rs | 2 +- validator_manager/src/move_validators.rs | 2 +- 61 files changed, 110 insertions(+), 138 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index 80766d57b3..7bbb9ff74d 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -573,7 +573,7 @@ impl BeaconChain { .start_slot(T::EthSpec::slots_per_epoch()); let is_canonical = self .block_root_at_slot(block_slot, WhenSlotSkipped::None)? - .map_or(false, |canonical_root| block_root == &canonical_root); + .is_some_and(|canonical_root| block_root == &canonical_root); Ok(block_slot <= finalized_slot && is_canonical) } @@ -604,7 +604,7 @@ impl BeaconChain { let slot_is_finalized = state_slot <= finalized_slot; let canonical = self .state_root_at_slot(state_slot)? - .map_or(false, |canonical_root| state_root == &canonical_root); + .is_some_and(|canonical_root| state_root == &canonical_root); Ok(FinalizationAndCanonicity { slot_is_finalized, canonical, @@ -5118,9 +5118,9 @@ impl BeaconChain { .start_of(slot) .unwrap_or_else(|| Duration::from_secs(0)), ); - block_delays.observed.map_or(false, |delay| { - delay >= self.slot_clock.unagg_attestation_production_delay() - }) + block_delays + .observed + .is_some_and(|delay| delay >= self.slot_clock.unagg_attestation_production_delay()) } /// Produce a block for some `slot` upon the given `state`. diff --git a/beacon_node/beacon_chain/src/canonical_head.rs b/beacon_node/beacon_chain/src/canonical_head.rs index 4f92f5ec8f..4e21372efb 100644 --- a/beacon_node/beacon_chain/src/canonical_head.rs +++ b/beacon_node/beacon_chain/src/canonical_head.rs @@ -1254,11 +1254,7 @@ pub fn find_reorg_slot( ($state: ident, $block_root: ident) => { std::iter::once(Ok(($state.slot(), $block_root))) .chain($state.rev_iter_block_roots(spec)) - .skip_while(|result| { - result - .as_ref() - .map_or(false, |(slot, _)| *slot > lowest_slot) - }) + .skip_while(|result| result.as_ref().is_ok_and(|(slot, _)| *slot > lowest_slot)) }; } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index 72806a74d2..f6002ea0ac 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -519,13 +519,13 @@ impl DataAvailabilityChecker { /// Returns true if the given epoch lies within the da boundary and false otherwise. pub fn da_check_required_for_epoch(&self, block_epoch: Epoch) -> bool { self.data_availability_boundary() - .map_or(false, |da_epoch| block_epoch >= da_epoch) + .is_some_and(|da_epoch| block_epoch >= da_epoch) } /// Returns `true` if the current epoch is greater than or equal to the `Deneb` epoch. pub fn is_deneb(&self) -> bool { - self.slot_clock.now().map_or(false, |slot| { - self.spec.deneb_fork_epoch.map_or(false, |deneb_epoch| { + self.slot_clock.now().is_some_and(|slot| { + self.spec.deneb_fork_epoch.is_some_and(|deneb_epoch| { let now_epoch = slot.epoch(T::EthSpec::slots_per_epoch()); now_epoch >= deneb_epoch }) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 40361574af..5ce023038d 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -228,13 +228,10 @@ impl PendingComponents { ); let all_blobs_received = block_kzg_commitments_count_opt - .map_or(false, |num_expected_blobs| { - num_expected_blobs == num_received_blobs - }); + .is_some_and(|num_expected_blobs| num_expected_blobs == num_received_blobs); - let all_columns_received = expected_columns_opt.map_or(false, |num_expected_columns| { - num_expected_columns == num_received_columns - }); + let all_columns_received = expected_columns_opt + .is_some_and(|num_expected_columns| num_expected_columns == num_received_columns); all_blobs_received || all_columns_received } diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index 606610a748..c94ea0e941 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -145,7 +145,7 @@ impl EarlyAttesterCache { self.item .read() .as_ref() - .map_or(false, |item| item.beacon_block_root == block_root) + .is_some_and(|item| item.beacon_block_root == block_root) } /// Returns the block, if `block_root` matches the cached item. diff --git a/beacon_node/beacon_chain/src/eth1_chain.rs b/beacon_node/beacon_chain/src/eth1_chain.rs index cb6e4c34f3..ad4f106517 100644 --- a/beacon_node/beacon_chain/src/eth1_chain.rs +++ b/beacon_node/beacon_chain/src/eth1_chain.rs @@ -153,7 +153,7 @@ fn get_sync_status( // Lighthouse is "cached and ready" when it has cached enough blocks to cover the start of the // current voting period. let lighthouse_is_cached_and_ready = - latest_cached_block_timestamp.map_or(false, |t| t >= voting_target_timestamp); + latest_cached_block_timestamp.is_some_and(|t| t >= voting_target_timestamp); Some(Eth1SyncStatusData { head_block_number, diff --git a/beacon_node/beacon_chain/src/execution_payload.rs b/beacon_node/beacon_chain/src/execution_payload.rs index 92d24c53c0..502a7918a1 100644 --- a/beacon_node/beacon_chain/src/execution_payload.rs +++ b/beacon_node/beacon_chain/src/execution_payload.rs @@ -127,9 +127,9 @@ impl PayloadNotifier { /// contains a few extra checks by running `partially_verify_execution_payload` first: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.9/specs/bellatrix/beacon-chain.md#notify_new_payload -async fn notify_new_payload<'a, T: BeaconChainTypes>( +async fn notify_new_payload( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, ) -> Result { let execution_layer = chain .execution_layer @@ -230,9 +230,9 @@ async fn notify_new_payload<'a, T: BeaconChainTypes>( /// Equivalent to the `validate_merge_block` function in the merge Fork Choice Changes: /// /// https://github.com/ethereum/consensus-specs/blob/v1.1.5/specs/merge/fork-choice.md#validate_merge_block -pub async fn validate_merge_block<'a, T: BeaconChainTypes>( +pub async fn validate_merge_block( chain: &Arc>, - block: BeaconBlockRef<'a, T::EthSpec>, + block: BeaconBlockRef<'_, T::EthSpec>, allow_optimistic_import: AllowOptimisticImport, ) -> Result<(), BlockError> { let spec = &chain.spec; diff --git a/beacon_node/beacon_chain/src/graffiti_calculator.rs b/beacon_node/beacon_chain/src/graffiti_calculator.rs index 4373164d62..8692d374ed 100644 --- a/beacon_node/beacon_chain/src/graffiti_calculator.rs +++ b/beacon_node/beacon_chain/src/graffiti_calculator.rs @@ -293,10 +293,7 @@ mod tests { .await .unwrap(); - let version_bytes = std::cmp::min( - lighthouse_version::VERSION.as_bytes().len(), - GRAFFITI_BYTES_LEN, - ); + let version_bytes = std::cmp::min(lighthouse_version::VERSION.len(), GRAFFITI_BYTES_LEN); // grab the slice of the graffiti that corresponds to the lighthouse version let graffiti_slice = &harness.chain.graffiti_calculator.get_graffiti(None).await.0[..version_bytes]; @@ -361,7 +358,7 @@ mod tests { let graffiti_str = "nice graffiti bro"; let mut graffiti_bytes = [0u8; GRAFFITI_BYTES_LEN]; - graffiti_bytes[..graffiti_str.as_bytes().len()].copy_from_slice(graffiti_str.as_bytes()); + graffiti_bytes[..graffiti_str.len()].copy_from_slice(graffiti_str.as_bytes()); let found_graffiti = harness .chain diff --git a/beacon_node/beacon_chain/src/observed_aggregates.rs b/beacon_node/beacon_chain/src/observed_aggregates.rs index dec012fb92..20ed36ace7 100644 --- a/beacon_node/beacon_chain/src/observed_aggregates.rs +++ b/beacon_node/beacon_chain/src/observed_aggregates.rs @@ -293,7 +293,7 @@ impl SlotHashSet { Ok(self .map .get(&root) - .map_or(false, |agg| agg.iter().any(|val| item.is_subset(val)))) + .is_some_and(|agg| agg.iter().any(|val| item.is_subset(val)))) } /// The number of observed items in `self`. diff --git a/beacon_node/beacon_chain/src/observed_attesters.rs b/beacon_node/beacon_chain/src/observed_attesters.rs index efb95f57a9..5bba8e4d8e 100644 --- a/beacon_node/beacon_chain/src/observed_attesters.rs +++ b/beacon_node/beacon_chain/src/observed_attesters.rs @@ -130,7 +130,7 @@ impl Item<()> for EpochBitfield { fn get(&self, validator_index: usize) -> Option<()> { self.bitfield .get(validator_index) - .map_or(false, |bit| *bit) + .is_some_and(|bit| *bit) .then_some(()) } } @@ -336,7 +336,7 @@ impl, E: EthSpec> AutoPruningEpochContainer { let exists = self .items .get(&epoch) - .map_or(false, |item| item.get(validator_index).is_some()); + .is_some_and(|item| item.get(validator_index).is_some()); Ok(exists) } diff --git a/beacon_node/beacon_chain/src/observed_data_sidecars.rs b/beacon_node/beacon_chain/src/observed_data_sidecars.rs index 53f8c71f54..a9f4664064 100644 --- a/beacon_node/beacon_chain/src/observed_data_sidecars.rs +++ b/beacon_node/beacon_chain/src/observed_data_sidecars.rs @@ -118,7 +118,7 @@ impl ObservedDataSidecars { slot: data_sidecar.slot(), proposer: data_sidecar.block_proposer_index(), }) - .map_or(false, |indices| indices.contains(&data_sidecar.index())); + .is_some_and(|indices| indices.contains(&data_sidecar.index())); Ok(is_known) } diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index da1d60db17..67ca72254b 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -253,7 +253,7 @@ impl BlockShufflingIds { } else if self .previous .as_ref() - .map_or(false, |id| id.shuffling_epoch == epoch) + .is_some_and(|id| id.shuffling_epoch == epoch) { self.previous.clone() } else if epoch == self.next.shuffling_epoch { diff --git a/beacon_node/beacon_processor/src/lib.rs b/beacon_node/beacon_processor/src/lib.rs index 2a69b04c91..0edda2f95b 100644 --- a/beacon_node/beacon_processor/src/lib.rs +++ b/beacon_node/beacon_processor/src/lib.rs @@ -1022,7 +1022,7 @@ impl BeaconProcessor { let can_spawn = self.current_workers < self.config.max_workers; let drop_during_sync = work_event .as_ref() - .map_or(false, |event| event.drop_during_sync); + .is_some_and(|event| event.drop_during_sync); let idle_tx = idle_tx.clone(); let modified_queue_id = match work_event { diff --git a/beacon_node/client/src/builder.rs b/beacon_node/client/src/builder.rs index 7c6a253aca..24c6615822 100644 --- a/beacon_node/client/src/builder.rs +++ b/beacon_node/client/src/builder.rs @@ -910,7 +910,7 @@ where .forkchoice_update_parameters(); if params .head_hash - .map_or(false, |hash| hash != ExecutionBlockHash::zero()) + .is_some_and(|hash| hash != ExecutionBlockHash::zero()) { // Spawn a new task to update the EE without waiting for it to complete. let inner_chain = beacon_chain.clone(); diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index f686c2c650..e88803e94f 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -197,7 +197,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -233,7 +233,7 @@ pub fn spawn_notifier( ); let speed = speedo.slots_per_second(); - let display_speed = speed.map_or(false, |speed| speed != 0.0); + let display_speed = speed.is_some_and(|speed| speed != 0.0); if display_speed { info!( @@ -339,9 +339,7 @@ async fn bellatrix_readiness_logging( .message() .body() .execution_payload() - .map_or(false, |payload| { - payload.parent_hash() != ExecutionBlockHash::zero() - }); + .is_ok_and(|payload| payload.parent_hash() != ExecutionBlockHash::zero()); let has_execution_layer = beacon_chain.execution_layer.is_some(); diff --git a/beacon_node/execution_layer/src/engine_api/http.rs b/beacon_node/execution_layer/src/engine_api/http.rs index 33dc60d037..e2a81c072c 100644 --- a/beacon_node/execution_layer/src/engine_api/http.rs +++ b/beacon_node/execution_layer/src/engine_api/http.rs @@ -158,9 +158,7 @@ pub mod deposit_log { }; let signature_is_valid = deposit_pubkey_signature_message(&deposit_data, spec) - .map_or(false, |(public_key, signature, msg)| { - signature.verify(&public_key, msg) - }); + .is_some_and(|(public_key, signature, msg)| signature.verify(&public_key, msg)); Ok(DepositLog { deposit_data, @@ -592,7 +590,7 @@ impl CachedResponse { /// returns `true` if the entry's age is >= age_limit pub fn older_than(&self, age_limit: Option) -> bool { - age_limit.map_or(false, |limit| self.age() >= limit) + age_limit.is_some_and(|limit| self.age() >= limit) } } @@ -720,9 +718,9 @@ impl HttpJsonRpc { .await } - pub async fn get_block_by_number<'a>( + pub async fn get_block_by_number( &self, - query: BlockByNumberQuery<'a>, + query: BlockByNumberQuery<'_>, ) -> Result, Error> { let params = json!([query, RETURN_FULL_TRANSACTION_OBJECTS]); diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index ae0dca9833..f3b12b21d1 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -2095,7 +2095,7 @@ fn verify_builder_bid( payload: header.timestamp(), expected: payload_attributes.timestamp(), })) - } else if block_number.map_or(false, |n| n != header.block_number()) { + } else if block_number.is_some_and(|n| n != header.block_number()) { Err(Box::new(InvalidBuilderPayload::BlockNumber { payload: header.block_number(), expected: block_number, diff --git a/beacon_node/execution_layer/src/payload_status.rs b/beacon_node/execution_layer/src/payload_status.rs index 5405fd7009..cf0be8ed0d 100644 --- a/beacon_node/execution_layer/src/payload_status.rs +++ b/beacon_node/execution_layer/src/payload_status.rs @@ -41,7 +41,7 @@ pub fn process_payload_status( PayloadStatusV1Status::Valid => { if response .latest_valid_hash - .map_or(false, |h| h == head_block_hash) + .is_some_and(|h| h == head_block_hash) { // The response is only valid if `latest_valid_hash` is not `null` and // equal to the provided `block_hash`. diff --git a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs index 48372a39be..dc90d91c0f 100644 --- a/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs +++ b/beacon_node/execution_layer/src/test_utils/mock_execution_layer.rs @@ -318,7 +318,7 @@ impl MockExecutionLayer { (self, block_hash) } - pub async fn with_terminal_block<'a, U, V>(self, func: U) -> Self + pub async fn with_terminal_block(self, func: U) -> Self where U: Fn(ChainSpec, ExecutionLayer, Option) -> V, V: Future, diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index 3981833a5c..b5f4bd50ee 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -270,7 +270,7 @@ impl Eth1GenesisService { // Ignore any block that has already been processed or update the highest processed // block. - if highest_processed_block.map_or(false, |highest| highest >= block.number) { + if highest_processed_block.is_some_and(|highest| highest >= block.number) { continue; } else { self.stats diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 23d177da78..febdf69259 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1164,7 +1164,7 @@ pub fn serve( .map_err(warp_utils::reject::beacon_chain_error)? // Ignore any skip-slots immediately following the parent. .find(|res| { - res.as_ref().map_or(false, |(root, _)| *root != parent_root) + res.as_ref().is_ok_and(|(root, _)| *root != parent_root) }) .transpose() .map_err(warp_utils::reject::beacon_chain_error)? @@ -1249,7 +1249,7 @@ pub fn serve( let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .map_err(warp_utils::reject::beacon_chain_error)? - .map_or(false, |canonical| root == canonical); + .is_some_and(|canonical| root == canonical); let data = api_types::BlockHeaderData { root, diff --git a/beacon_node/http_api/src/validator.rs b/beacon_node/http_api/src/validator.rs index 7f11ddd8f4..baa41e33ed 100644 --- a/beacon_node/http_api/src/validator.rs +++ b/beacon_node/http_api/src/validator.rs @@ -14,7 +14,7 @@ pub fn pubkey_to_validator_index( state .validators() .get(index) - .map_or(false, |v| v.pubkey == *pubkey) + .is_some_and(|v| v.pubkey == *pubkey) }) .map(Result::Ok) .transpose() diff --git a/beacon_node/http_api/tests/interactive_tests.rs b/beacon_node/http_api/tests/interactive_tests.rs index e45dcf221c..8cfcf5d93e 100644 --- a/beacon_node/http_api/tests/interactive_tests.rs +++ b/beacon_node/http_api/tests/interactive_tests.rs @@ -161,7 +161,7 @@ impl ForkChoiceUpdates { update .payload_attributes .as_ref() - .map_or(false, |payload_attributes| { + .is_some_and(|payload_attributes| { payload_attributes.timestamp() == proposal_timestamp }) }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 7007a14466..1efe44a613 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -1278,7 +1278,7 @@ impl ApiTester { .chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) .unwrap() - .map_or(false, |canonical| block_root == canonical); + .is_some_and(|canonical| block_root == canonical); assert_eq!(result.canonical, canonical, "{:?}", block_id); assert_eq!(result.root, block_root, "{:?}", block_id); diff --git a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs index 537d2319c2..0d77e2cd0f 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/backoff.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/backoff.rs @@ -124,7 +124,7 @@ impl BackoffStorage { pub(crate) fn is_backoff_with_slack(&self, topic: &TopicHash, peer: &PeerId) -> bool { self.backoffs .get(topic) - .map_or(false, |m| m.contains_key(peer)) + .is_some_and(|m| m.contains_key(peer)) } pub(crate) fn get_backoff_time(&self, topic: &TopicHash, peer: &PeerId) -> Option { diff --git a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs index c4e20e4397..6528e737a3 100644 --- a/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs +++ b/beacon_node/lighthouse_network/gossipsub/src/behaviour.rs @@ -1770,8 +1770,7 @@ where // reject messages claiming to be from ourselves but not locally published let self_published = !self.config.allow_self_origin() && if let Some(own_id) = self.publish_config.get_own_id() { - own_id != propagation_source - && raw_message.source.as_ref().map_or(false, |s| s == own_id) + own_id != propagation_source && raw_message.source.as_ref() == Some(own_id) } else { self.published_message_ids.contains(msg_id) }; diff --git a/beacon_node/lighthouse_network/src/config.rs b/beacon_node/lighthouse_network/src/config.rs index 21f3dc830f..8a93b1185d 100644 --- a/beacon_node/lighthouse_network/src/config.rs +++ b/beacon_node/lighthouse_network/src/config.rs @@ -166,7 +166,7 @@ impl Config { tcp_port, }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip4().as_ref().map_or(false, is_global_ipv4) + self.discv5_config.table_filter = |enr| enr.ip4().as_ref().is_some_and(is_global_ipv4) } /// Sets the listening address to use an ipv6 address. The discv5 ip_mode and table filter is @@ -187,7 +187,7 @@ impl Config { }); self.discv5_config.listen_config = discv5::ListenConfig::from_ip(addr.into(), disc_port); - self.discv5_config.table_filter = |enr| enr.ip6().as_ref().map_or(false, is_global_ipv6) + self.discv5_config.table_filter = |enr| enr.ip6().as_ref().is_some_and(is_global_ipv6) } /// Sets the listening address to use both an ipv4 and ipv6 address. The discv5 ip_mode and @@ -317,7 +317,7 @@ impl Default for Config { .filter_rate_limiter(filter_rate_limiter) .filter_max_bans_per_ip(Some(5)) .filter_max_nodes_per_ip(Some(10)) - .table_filter(|enr| enr.ip4().map_or(false, |ip| is_global_ipv4(&ip))) // Filter non-global IPs + .table_filter(|enr| enr.ip4().is_some_and(|ip| is_global_ipv4(&ip))) // Filter non-global IPs .ban_duration(Some(Duration::from_secs(3600))) .ping_interval(Duration::from_secs(300)) .build(); diff --git a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs index 02ff0cc3ca..751f8dbb83 100644 --- a/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs +++ b/beacon_node/lighthouse_network/src/discovery/subnet_predicate.rs @@ -35,7 +35,7 @@ where .unwrap_or(false), Subnet::SyncCommittee(s) => sync_committee_bitfield .as_ref() - .map_or(false, |b| b.get(*s.deref() as usize).unwrap_or(false)), + .is_ok_and(|b| b.get(*s.deref() as usize).unwrap_or(false)), Subnet::DataColumn(s) => { if let Ok(custody_subnet_count) = enr.custody_subnet_count::(&spec) { DataColumnSubnetId::compute_custody_subnets::( @@ -43,7 +43,7 @@ where custody_subnet_count, &spec, ) - .map_or(false, |mut subnets| subnets.contains(s)) + .is_ok_and(|mut subnets| subnets.contains(s)) } else { false } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs index d2effd4d03..22a3df1ae8 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb.rs @@ -1305,7 +1305,7 @@ impl BannedPeersCount { pub fn ip_is_banned(&self, ip: &IpAddr) -> bool { self.banned_peers_per_ip .get(ip) - .map_or(false, |count| *count > BANNED_PEERS_PER_IP_THRESHOLD) + .is_some_and(|count| *count > BANNED_PEERS_PER_IP_THRESHOLD) } } diff --git a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs index ee8c27f474..27c8463a55 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/peerdb/peer_info.rs @@ -99,7 +99,7 @@ impl PeerInfo { Subnet::SyncCommittee(id) => { return meta_data .syncnets() - .map_or(false, |s| s.get(**id as usize).unwrap_or(false)) + .is_ok_and(|s| s.get(**id as usize).unwrap_or(false)) } Subnet::DataColumn(column) => return self.custody_subnets.contains(column), } @@ -264,7 +264,7 @@ impl PeerInfo { /// Reports if this peer has some future validator duty in which case it is valuable to keep it. pub fn has_future_duty(&self) -> bool { - self.min_ttl.map_or(false, |i| i >= Instant::now()) + self.min_ttl.is_some_and(|i| i >= Instant::now()) } /// Returns score of the peer. diff --git a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs index f3c48e42f0..6b5753e96a 100644 --- a/beacon_node/network/src/network_beacon_processor/gossip_methods.rs +++ b/beacon_node/network/src/network_beacon_processor/gossip_methods.rs @@ -3129,7 +3129,7 @@ impl NetworkBeaconProcessor { .chain .slot_clock .now() - .map_or(false, |current_slot| sync_message_slot == current_slot); + .is_some_and(|current_slot| sync_message_slot == current_slot); self.propagate_if_timely(is_timely, message_id, peer_id) } diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index d01c73118c..d8183de752 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -186,7 +186,7 @@ impl OperationPool { self.sync_contributions.write().retain(|_, contributions| { // All the contributions in this bucket have the same data, so we only need to // check the first one. - contributions.first().map_or(false, |contribution| { + contributions.first().is_some_and(|contribution| { current_slot <= contribution.slot.saturating_add(Slot::new(1)) }) }); @@ -401,7 +401,7 @@ impl OperationPool { && state .validators() .get(slashing.as_inner().signed_header_1.message.proposer_index as usize) - .map_or(false, |validator| !validator.slashed) + .is_some_and(|validator| !validator.slashed) }, |slashing| slashing.as_inner().clone(), E::MaxProposerSlashings::to_usize(), @@ -484,7 +484,7 @@ impl OperationPool { validator.exit_epoch > head_state.finalized_checkpoint().epoch }, ) - .map_or(false, |indices| !indices.is_empty()); + .is_ok_and(|indices| !indices.is_empty()); signature_ok && slashing_ok }); @@ -583,9 +583,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_execution_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_execution_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), E::MaxBlsToExecutionChanges::to_usize(), @@ -609,9 +607,7 @@ impl OperationPool { address_change.signature_is_still_valid(&state.fork()) && state .get_validator(address_change.as_inner().message.validator_index as usize) - .map_or(false, |validator| { - !validator.has_eth1_withdrawal_credential(spec) - }) + .is_ok_and(|validator| !validator.has_eth1_withdrawal_credential(spec)) }, |address_change| address_change.as_inner().clone(), usize::MAX, diff --git a/beacon_node/store/src/forwards_iter.rs b/beacon_node/store/src/forwards_iter.rs index 27769a310a..955bd33b30 100644 --- a/beacon_node/store/src/forwards_iter.rs +++ b/beacon_node/store/src/forwards_iter.rs @@ -265,7 +265,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> // `end_slot`. If it tries to continue further a `NoContinuationData` error will be // returned. let continuation_data = - if end_slot.map_or(false, |end_slot| end_slot < freezer_upper_bound) { + if end_slot.is_some_and(|end_slot| end_slot < freezer_upper_bound) { None } else { Some(Box::new(get_state()?)) @@ -306,7 +306,7 @@ impl<'a, E: EthSpec, Hot: ItemStore, Cold: ItemStore> None => { // If the iterator has an end slot (inclusive) which has already been // covered by the (exclusive) frozen forwards iterator, then we're done! - if end_slot.map_or(false, |end_slot| iter.end_slot == end_slot + 1) { + if end_slot.is_some_and(|end_slot| iter.end_slot == end_slot + 1) { *self = Finished; return Ok(None); } diff --git a/beacon_node/store/src/reconstruct.rs b/beacon_node/store/src/reconstruct.rs index 9bec83a35c..2a3b208aae 100644 --- a/beacon_node/store/src/reconstruct.rs +++ b/beacon_node/store/src/reconstruct.rs @@ -111,7 +111,7 @@ where self.store_cold_state(&state_root, &state, &mut io_batch)?; let batch_complete = - num_blocks.map_or(false, |n_blocks| slot == lower_limit_slot + n_blocks as u64); + num_blocks.is_some_and(|n_blocks| slot == lower_limit_slot + n_blocks as u64); let reconstruction_complete = slot + 1 == upper_limit_slot; // Commit the I/O batch if: diff --git a/beacon_node/store/src/state_cache.rs b/beacon_node/store/src/state_cache.rs index 5c1faa7f2f..96e4de4639 100644 --- a/beacon_node/store/src/state_cache.rs +++ b/beacon_node/store/src/state_cache.rs @@ -77,9 +77,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - state.slot() < finalized_state.state.slot() - }) + .is_some_and(|finalized_state| state.slot() < finalized_state.state.slot()) { return Err(Error::FinalizedStateDecreasingSlot); } @@ -127,9 +125,7 @@ impl StateCache { if self .finalized_state .as_ref() - .map_or(false, |finalized_state| { - finalized_state.state_root == state_root - }) + .is_some_and(|finalized_state| finalized_state.state_root == state_root) { return Ok(PutStateOutcome::Finalized); } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index a4850fc1c6..24f6861daa 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -435,7 +435,7 @@ pub fn recursively_find_voting_keystores>( && dir_entry .file_name() .to_str() - .map_or(false, is_voting_keystore) + .is_some_and(is_voting_keystore) { matches.push(dir_entry.path()) } diff --git a/common/logging/src/lib.rs b/common/logging/src/lib.rs index 7fe7f79506..0ddd867c2f 100644 --- a/common/logging/src/lib.rs +++ b/common/logging/src/lib.rs @@ -204,7 +204,7 @@ impl TimeLatch { pub fn elapsed(&mut self) -> bool { let now = Instant::now(); - let is_elapsed = self.0.map_or(false, |elapse_time| now > elapse_time); + let is_elapsed = self.0.is_some_and(|elapse_time| now > elapse_time); if is_elapsed || self.0.is_none() { self.0 = Some(now + LOG_DEBOUNCE_INTERVAL); diff --git a/consensus/proto_array/src/proto_array.rs b/consensus/proto_array/src/proto_array.rs index 38ea141199..5d0bee4c85 100644 --- a/consensus/proto_array/src/proto_array.rs +++ b/consensus/proto_array/src/proto_array.rs @@ -468,7 +468,7 @@ impl ProtoArray { // 1. The `head_block_root` is a descendant of `latest_valid_ancestor_hash` // 2. The `latest_valid_ancestor_hash` is equal to or a descendant of the finalized block. let latest_valid_ancestor_is_descendant = - latest_valid_ancestor_root.map_or(false, |ancestor_root| { + latest_valid_ancestor_root.is_some_and(|ancestor_root| { self.is_descendant(ancestor_root, head_block_root) && self.is_finalized_checkpoint_or_descendant::(ancestor_root) }); @@ -505,13 +505,13 @@ impl ProtoArray { // head. if node .best_child - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_child = None } if node .best_descendant - .map_or(false, |i| invalidated_indices.contains(&i)) + .is_some_and(|i| invalidated_indices.contains(&i)) { node.best_descendant = None } @@ -999,7 +999,7 @@ impl ProtoArray { node.unrealized_finalized_checkpoint, node.unrealized_justified_checkpoint, ] { - if checkpoint.map_or(false, |cp| cp == self.finalized_checkpoint) { + if checkpoint.is_some_and(|cp| cp == self.finalized_checkpoint) { return true; } } @@ -1037,7 +1037,7 @@ impl ProtoArray { .find(|node| { node.execution_status .block_hash() - .map_or(false, |node_block_hash| node_block_hash == *block_hash) + .is_some_and(|node_block_hash| node_block_hash == *block_hash) }) .map(|node| node.root) } diff --git a/consensus/state_processing/src/genesis.rs b/consensus/state_processing/src/genesis.rs index 00697def5d..ccff3d80c0 100644 --- a/consensus/state_processing/src/genesis.rs +++ b/consensus/state_processing/src/genesis.rs @@ -53,7 +53,7 @@ pub fn initialize_beacon_state_from_eth1( // https://github.com/ethereum/eth2.0-specs/pull/2323 if spec .altair_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_altair(&mut state, spec)?; @@ -63,7 +63,7 @@ pub fn initialize_beacon_state_from_eth1( // Similarly, perform an upgrade to the merge if configured from genesis. if spec .bellatrix_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { // this will set state.latest_execution_payload_header = ExecutionPayloadHeaderBellatrix::default() upgrade_to_bellatrix(&mut state, spec)?; @@ -81,7 +81,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to capella if configured from genesis if spec .capella_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_capella(&mut state, spec)?; @@ -98,7 +98,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to deneb if configured from genesis if spec .deneb_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { upgrade_to_deneb(&mut state, spec)?; @@ -115,7 +115,7 @@ pub fn initialize_beacon_state_from_eth1( // Upgrade to electra if configured from genesis. if spec .electra_fork_epoch - .map_or(false, |fork_epoch| fork_epoch == E::genesis_epoch()) + .is_some_and(|fork_epoch| fork_epoch == E::genesis_epoch()) { let post = upgrade_state_to_electra(&mut state, Epoch::new(0), Epoch::new(0), spec)?; state = post; @@ -153,7 +153,7 @@ pub fn initialize_beacon_state_from_eth1( pub fn is_valid_genesis_state(state: &BeaconState, spec: &ChainSpec) -> bool { state .get_active_validator_indices(E::genesis_epoch(), spec) - .map_or(false, |active_validators| { + .is_ok_and(|active_validators| { state.genesis_time() >= spec.min_genesis_time && active_validators.len() as u64 >= spec.min_genesis_active_validator_count }) diff --git a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs index 210db4c9c1..08cfd9cba8 100644 --- a/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs +++ b/consensus/state_processing/src/per_block_processing/altair/sync_committee.rs @@ -38,7 +38,7 @@ pub fn process_sync_aggregate( )?; // If signature set is `None` then the signature is valid (infinity). - if signature_set.map_or(false, |signature| !signature.verify()) { + if signature_set.is_some_and(|signature| !signature.verify()) { return Err(SyncAggregateInvalid::SignatureInvalid.into()); } } diff --git a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs index 952ab3f649..5508b80807 100644 --- a/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs +++ b/consensus/state_processing/src/per_epoch_processing/epoch_processing_summary.rs @@ -151,7 +151,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_current_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_current_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.current_epoch) } @@ -176,7 +176,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_current_epoch_target_attester)), + .is_some_and(|s| s.is_current_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_current_epoch_unslashed_participating_index( val_index, @@ -247,7 +247,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => statuses .get(val_index) - .map_or(false, |s| s.is_active_in_previous_epoch && !s.is_slashed), + .is_some_and(|s| s.is_active_in_previous_epoch && !s.is_slashed), EpochProcessingSummary::Altair { participation, .. } => { participation.is_active_and_unslashed(val_index, participation.previous_epoch) } @@ -267,7 +267,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_target_attester)), + .is_some_and(|s| s.is_previous_epoch_target_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, @@ -294,7 +294,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_head_attester)), + .is_some_and(|s| s.is_previous_epoch_head_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index(val_index, TIMELY_HEAD_FLAG_INDEX), } @@ -318,7 +318,7 @@ impl EpochProcessingSummary { match self { EpochProcessingSummary::Base { statuses, .. } => Ok(statuses .get(val_index) - .map_or(false, |s| s.is_previous_epoch_attester)), + .is_some_and(|s| s.is_previous_epoch_attester)), EpochProcessingSummary::Altair { participation, .. } => participation .is_previous_epoch_unslashed_participating_index( val_index, diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index b896dc4693..f7a701fed6 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -283,7 +283,7 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, /// Return `true` if this block body has a non-zero number of blobs. pub fn has_blobs(self) -> bool { self.blob_kzg_commitments() - .map_or(false, |blobs| !blobs.is_empty()) + .is_ok_and(|blobs| !blobs.is_empty()) } pub fn attestations_len(&self) -> usize { diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index ad4484b86a..05f28744fa 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -1856,7 +1856,7 @@ impl BeaconState { pub fn committee_cache_is_initialized(&self, relative_epoch: RelativeEpoch) -> bool { let i = Self::committee_cache_index(relative_epoch); - self.committee_cache_at_index(i).map_or(false, |cache| { + self.committee_cache_at_index(i).is_ok_and(|cache| { cache.is_initialized_at(relative_epoch.into_epoch(self.current_epoch())) }) } diff --git a/consensus/types/src/beacon_state/progressive_balances_cache.rs b/consensus/types/src/beacon_state/progressive_balances_cache.rs index fd5e51313f..bc258ef68d 100644 --- a/consensus/types/src/beacon_state/progressive_balances_cache.rs +++ b/consensus/types/src/beacon_state/progressive_balances_cache.rs @@ -145,7 +145,7 @@ impl ProgressiveBalancesCache { pub fn is_initialized_at(&self, epoch: Epoch) -> bool { self.inner .as_ref() - .map_or(false, |inner| inner.current_epoch == epoch) + .is_some_and(|inner| inner.current_epoch == epoch) } /// When a new target attestation has been processed, we update the cached diff --git a/consensus/types/src/chain_spec.rs b/consensus/types/src/chain_spec.rs index 0b33a76ff1..9d3308cf23 100644 --- a/consensus/types/src/chain_spec.rs +++ b/consensus/types/src/chain_spec.rs @@ -420,16 +420,14 @@ impl ChainSpec { /// Returns true if the given epoch is greater than or equal to the `EIP7594_FORK_EPOCH`. pub fn is_peer_das_enabled_for_epoch(&self, block_epoch: Epoch) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - block_epoch >= eip7594_fork_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| block_epoch >= eip7594_fork_epoch) } /// Returns true if `EIP7594_FORK_EPOCH` is set and is not set to `FAR_FUTURE_EPOCH`. pub fn is_peer_das_scheduled(&self) -> bool { - self.eip7594_fork_epoch.map_or(false, |eip7594_fork_epoch| { - eip7594_fork_epoch != self.far_future_epoch - }) + self.eip7594_fork_epoch + .is_some_and(|eip7594_fork_epoch| eip7594_fork_epoch != self.far_future_epoch) } /// Returns a full `Fork` struct for a given epoch. diff --git a/consensus/types/src/deposit_tree_snapshot.rs b/consensus/types/src/deposit_tree_snapshot.rs index df1064daba..2f9df8758b 100644 --- a/consensus/types/src/deposit_tree_snapshot.rs +++ b/consensus/types/src/deposit_tree_snapshot.rs @@ -72,8 +72,7 @@ impl DepositTreeSnapshot { Some(Hash256::from_slice(&deposit_root)) } pub fn is_valid(&self) -> bool { - self.calculate_root() - .map_or(false, |calculated| self.deposit_root == calculated) + self.calculate_root() == Some(self.deposit_root) } } diff --git a/consensus/types/src/graffiti.rs b/consensus/types/src/graffiti.rs index 08f8573c6d..f781aacabd 100644 --- a/consensus/types/src/graffiti.rs +++ b/consensus/types/src/graffiti.rs @@ -57,7 +57,7 @@ impl FromStr for GraffitiString { type Err = String; fn from_str(s: &str) -> Result { - if s.as_bytes().len() > GRAFFITI_BYTES_LEN { + if s.len() > GRAFFITI_BYTES_LEN { return Err(format!( "Graffiti exceeds max length {}", GRAFFITI_BYTES_LEN diff --git a/lcli/src/transition_blocks.rs b/lcli/src/transition_blocks.rs index 94d95a0d1c..ecfa04fc81 100644 --- a/lcli/src/transition_blocks.rs +++ b/lcli/src/transition_blocks.rs @@ -223,7 +223,7 @@ pub fn run( .update_tree_hash_cache() .map_err(|e| format!("Unable to build THC: {:?}", e))?; - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), @@ -331,7 +331,7 @@ fn do_transition( .map_err(|e| format!("Unable to build tree hash cache: {:?}", e))?; debug!("Initial tree hash: {:?}", t.elapsed()); - if state_root_opt.map_or(false, |expected| expected != state_root) { + if state_root_opt.is_some_and(|expected| expected != state_root) { return Err(format!( "State root mismatch! Expected {}, computed {}", state_root_opt.unwrap(), diff --git a/slasher/src/database.rs b/slasher/src/database.rs index 20b4a33771..e2b49dca29 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -406,7 +406,7 @@ impl SlasherDB { ) -> Result<(), Error> { // Don't update maximum if new target is less than or equal to previous. In the case of // no previous we *do* want to update. - if previous_max_target.map_or(false, |prev_max| max_target <= prev_max) { + if previous_max_target.is_some_and(|prev_max| max_target <= prev_max) { return Ok(()); } diff --git a/testing/ef_tests/src/cases/fork_choice.rs b/testing/ef_tests/src/cases/fork_choice.rs index 427bcf5e9c..a1c74389a7 100644 --- a/testing/ef_tests/src/cases/fork_choice.rs +++ b/testing/ef_tests/src/cases/fork_choice.rs @@ -523,7 +523,7 @@ impl Tester { || Ok(()), ))? .map(|avail: AvailabilityProcessingStatus| avail.try_into()); - let success = blob_success && result.as_ref().map_or(false, |inner| inner.is_ok()); + let success = blob_success && result.as_ref().is_ok_and(|inner| inner.is_ok()); if success != valid { return Err(Error::DidntFail(format!( "block with root {} was valid={} whilst test expects valid={}. result: {:?}", diff --git a/testing/ef_tests/src/cases/operations.rs b/testing/ef_tests/src/cases/operations.rs index 54ca52447f..d8cade296b 100644 --- a/testing/ef_tests/src/cases/operations.rs +++ b/testing/ef_tests/src/cases/operations.rs @@ -322,7 +322,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { @@ -377,7 +377,7 @@ impl Operation for BeaconBlockBody> { let valid = extra .execution_metadata .as_ref() - .map_or(false, |e| e.execution_valid); + .is_some_and(|e| e.execution_valid); if valid { process_execution_payload::>(state, self.to_ref(), spec) } else { diff --git a/testing/ef_tests/src/decode.rs b/testing/ef_tests/src/decode.rs index 757b9bf3c4..eb88ac6af1 100644 --- a/testing/ef_tests/src/decode.rs +++ b/testing/ef_tests/src/decode.rs @@ -28,7 +28,7 @@ pub fn log_file_access>(file_accessed: P) { writeln!(&mut file, "{:?}", file_accessed.as_ref()).expect("should write to file"); - file.unlock().expect("unable to unlock file"); + fs2::FileExt::unlock(&file).expect("unable to unlock file"); } pub fn yaml_decode(string: &str) -> Result { diff --git a/validator_client/doppelganger_service/src/lib.rs b/validator_client/doppelganger_service/src/lib.rs index 35228fe354..4a593c2700 100644 --- a/validator_client/doppelganger_service/src/lib.rs +++ b/validator_client/doppelganger_service/src/lib.rs @@ -162,7 +162,7 @@ impl DoppelgangerState { /// If the BN fails to respond to either of these requests, simply return an empty response. /// This behaviour is to help prevent spurious failures on the BN from needlessly preventing /// doppelganger progression. -async fn beacon_node_liveness<'a, T: 'static + SlotClock, E: EthSpec>( +async fn beacon_node_liveness( beacon_nodes: Arc>, log: Logger, current_epoch: Epoch, diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index baaf930c68..71611339f9 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1113,9 +1113,7 @@ fn max_or(opt_x: Option, y: T) -> T { /// /// If prev is `None` and `new` is `Some` then `true` is returned. fn monotonic(new: Option, prev: Option) -> bool { - new.map_or(false, |new_val| { - prev.map_or(true, |prev_val| new_val >= prev_val) - }) + new.is_some_and(|new_val| prev.map_or(true, |prev_val| new_val >= prev_val)) } /// The result of importing a single entry from an interchange file. diff --git a/validator_client/validator_services/src/preparation_service.rs b/validator_client/validator_services/src/preparation_service.rs index 480f4af2b3..fe6eab3a8a 100644 --- a/validator_client/validator_services/src/preparation_service.rs +++ b/validator_client/validator_services/src/preparation_service.rs @@ -258,7 +258,7 @@ impl PreparationService { .slot_clock .now() .map_or(E::genesis_epoch(), |slot| slot.epoch(E::slots_per_epoch())); - spec.bellatrix_fork_epoch.map_or(false, |fork_epoch| { + spec.bellatrix_fork_epoch.is_some_and(|fork_epoch| { current_epoch + PROPOSER_PREPARATION_LOOKAHEAD_EPOCHS >= fork_epoch }) } diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index af501326f4..dd3e05088e 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -94,7 +94,7 @@ impl SyncDutiesMap { self.committees .read() .get(&committee_period) - .map_or(false, |committee_duties| { + .is_some_and(|committee_duties| { let validator_duties = committee_duties.validators.read(); validator_indices .iter() diff --git a/validator_manager/src/create_validators.rs b/validator_manager/src/create_validators.rs index d4403b4613..b40fe61a82 100644 --- a/validator_manager/src/create_validators.rs +++ b/validator_manager/src/create_validators.rs @@ -286,7 +286,7 @@ struct ValidatorsAndDeposits { } impl ValidatorsAndDeposits { - async fn new<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result { + async fn new(config: CreateConfig, spec: &ChainSpec) -> Result { let CreateConfig { // The output path is handled upstream. output_path: _, @@ -545,7 +545,7 @@ pub async fn cli_run( } } -async fn run<'a, E: EthSpec>(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { +async fn run(config: CreateConfig, spec: &ChainSpec) -> Result<(), String> { let output_path = config.output_path.clone(); if !output_path.exists() { diff --git a/validator_manager/src/delete_validators.rs b/validator_manager/src/delete_validators.rs index a2d6c062fa..5ef647c5af 100644 --- a/validator_manager/src/delete_validators.rs +++ b/validator_manager/src/delete_validators.rs @@ -86,7 +86,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: DeleteConfig) -> Result<(), String> { +async fn run(config: DeleteConfig) -> Result<(), String> { let DeleteConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/import_validators.rs b/validator_manager/src/import_validators.rs index 3cebc10bb3..63c7ca4596 100644 --- a/validator_manager/src/import_validators.rs +++ b/validator_manager/src/import_validators.rs @@ -209,7 +209,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ImportConfig) -> Result<(), String> { +async fn run(config: ImportConfig) -> Result<(), String> { let ImportConfig { validators_file_path, keystore_file_path, diff --git a/validator_manager/src/list_validators.rs b/validator_manager/src/list_validators.rs index e3deb0b21a..a0a1c5fb40 100644 --- a/validator_manager/src/list_validators.rs +++ b/validator_manager/src/list_validators.rs @@ -58,7 +58,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: ListConfig) -> Result, String> { +async fn run(config: ListConfig) -> Result, String> { let ListConfig { vc_url, vc_token_path, diff --git a/validator_manager/src/move_validators.rs b/validator_manager/src/move_validators.rs index 4d0820f5a8..abac071673 100644 --- a/validator_manager/src/move_validators.rs +++ b/validator_manager/src/move_validators.rs @@ -277,7 +277,7 @@ pub async fn cli_run(matches: &ArgMatches, dump_config: DumpConfig) -> Result<() } } -async fn run<'a>(config: MoveConfig) -> Result<(), String> { +async fn run(config: MoveConfig) -> Result<(), String> { let MoveConfig { src_vc_url, src_vc_token_path, From a244aa3a6971572c65dd1c68c726547a1d38c033 Mon Sep 17 00:00:00 2001 From: Eitan Seri-Levi Date: Fri, 10 Jan 2025 08:13:32 +0700 Subject: [PATCH 8/8] Add libssl install to udeps task (#6777) * Add libssl install to udeps task * Use HTTPS --- .github/workflows/test-suite.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 45f3b757e7..0ee9dbb622 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -392,6 +392,10 @@ jobs: cache: false env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + - name: Fetch libssl1.1 + run: wget https://nz2.archive.ubuntu.com/ubuntu/pool/main/o/openssl/libssl1.1_1.1.1f-1ubuntu2_amd64.deb + - name: Install libssl1.1 + run: sudo dpkg -i libssl1.1_1.1.1f-1ubuntu2_amd64.deb - name: Create Cargo config dir run: mkdir -p .cargo - name: Install custom Cargo config