From 669932aa671c69013f6133cdda7cb6c19b0832ae Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Thu, 16 Jan 2025 02:48:50 +0100 Subject: [PATCH] Misc. dependency cleanup (#6810) * remove ensure_dir_exists (2 deps saved) * group UNHANDLED_ERRORs into a generic (2 deps saved) * Introduce separate `health_metrics` crate * separate health_metrics crate * remove metrics from warp_utils * move ProcessHealth::observe and SystemHealth::observe to health_metrics * fix errors * nitpick `Cargo.toml`s --------- Co-authored-by: Daniel Knopik # Conflicts: # Cargo.toml --- Cargo.lock | 21 ++- Cargo.toml | 2 + account_manager/src/validator/create.rs | 11 +- account_manager/src/validator/recover.rs | 8 +- account_manager/src/wallet/mod.rs | 5 +- beacon_node/http_api/Cargo.toml | 1 + .../http_api/src/attestation_performance.rs | 18 +-- beacon_node/http_api/src/attester_duties.rs | 18 ++- beacon_node/http_api/src/block_id.rs | 28 ++-- .../http_api/src/block_packing_efficiency.rs | 12 +- beacon_node/http_api/src/block_rewards.rs | 22 +-- .../http_api/src/build_block_contents.rs | 2 +- beacon_node/http_api/src/lib.rs | 51 ++++--- beacon_node/http_api/src/produce_block.rs | 4 +- beacon_node/http_api/src/proposer_duties.rs | 20 +-- .../http_api/src/standard_block_rewards.rs | 4 +- beacon_node/http_api/src/state_id.rs | 24 ++-- .../http_api/src/sync_committee_rewards.rs | 6 +- beacon_node/http_api/src/sync_committees.rs | 8 +- beacon_node/http_api/src/ui.rs | 6 +- beacon_node/http_metrics/Cargo.toml | 1 + beacon_node/http_metrics/src/metrics.rs | 2 +- common/account_utils/Cargo.toml | 1 - .../src/validator_definitions.rs | 5 +- common/directory/src/lib.rs | 13 +- common/eth2/Cargo.toml | 6 +- common/eth2/src/lighthouse.rs | 122 ----------------- common/health_metrics/Cargo.toml | 12 ++ common/health_metrics/src/lib.rs | 2 + .../src/metrics.rs | 1 + common/health_metrics/src/observe.rs | 127 ++++++++++++++++++ common/monitoring_api/Cargo.toml | 1 + common/monitoring_api/src/gather.rs | 1 + common/monitoring_api/src/lib.rs | 1 + common/validator_dir/Cargo.toml | 1 - common/validator_dir/src/builder.rs | 5 +- common/warp_utils/Cargo.toml | 2 - common/warp_utils/src/lib.rs | 1 - common/warp_utils/src/reject.rs | 38 +----- validator_client/http_api/Cargo.toml | 1 + validator_client/http_api/src/lib.rs | 1 + validator_client/http_metrics/Cargo.toml | 1 + validator_client/http_metrics/src/lib.rs | 2 +- 43 files changed, 303 insertions(+), 315 deletions(-) create mode 100644 common/health_metrics/Cargo.toml create mode 100644 common/health_metrics/src/lib.rs rename common/{warp_utils => health_metrics}/src/metrics.rs (99%) create mode 100644 common/health_metrics/src/observe.rs diff --git a/Cargo.lock b/Cargo.lock index c62e9fbc878..aa9bdd2afc7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -43,7 +43,6 @@ dependencies = [ name = "account_utils" version = "0.1.0" dependencies = [ - "directory", "eth2_keystore", "eth2_wallet", "filesystem", @@ -2572,9 +2571,7 @@ dependencies = [ "lighthouse_network", "mediatype", "pretty_reqwest_error", - "procfs", "proto_array", - "psutil", "reqwest", "reqwest-eventsource", "sensitive_url", @@ -3710,6 +3707,16 @@ dependencies = [ "http 0.2.12", ] +[[package]] +name = "health_metrics" +version = "0.1.0" +dependencies = [ + "eth2", + "metrics", + "procfs", + "psutil", +] + [[package]] name = "heck" version = "0.4.1" @@ -3951,6 +3958,7 @@ dependencies = [ "execution_layer", "futures", "genesis", + "health_metrics", "hex", "lighthouse_network", "lighthouse_version", @@ -3986,6 +3994,7 @@ name = "http_metrics" version = "0.1.0" dependencies = [ "beacon_chain", + "health_metrics", "lighthouse_network", "lighthouse_version", "logging", @@ -5716,6 +5725,7 @@ name = "monitoring_api" version = "0.1.0" dependencies = [ "eth2", + "health_metrics", "lighthouse_version", "metrics", "regex", @@ -9561,7 +9571,6 @@ dependencies = [ "bls", "deposit_contract", "derivative", - "directory", "eth2_keystore", "filesystem", "hex", @@ -9589,6 +9598,7 @@ dependencies = [ "filesystem", "futures", "graffiti_file", + "health_metrics", "initialized_validators", "itertools 0.10.5", "lighthouse_version", @@ -9621,6 +9631,7 @@ dependencies = [ name = "validator_http_metrics" version = "0.1.0" dependencies = [ + "health_metrics", "lighthouse_version", "malloc_utils", "metrics", @@ -9799,7 +9810,6 @@ dependencies = [ name = "warp_utils" version = "0.1.0" dependencies = [ - "beacon_chain", "bytes", "eth2", "headers", @@ -9808,7 +9818,6 @@ dependencies = [ "serde", "serde_array_query", "serde_json", - "state_processing", "tokio", "types", "warp", diff --git a/Cargo.toml b/Cargo.toml index 233e5fa775b..e30b6aa2b60 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -33,6 +33,7 @@ members = [ "common/eth2_network_config", "common/eth2_wallet_manager", "common/filesystem", + "common/health_metrics", "common/lighthouse_version", "common/lockfile", "common/logging", @@ -252,6 +253,7 @@ filesystem = { path = "common/filesystem" } fork_choice = { path = "consensus/fork_choice" } genesis = { path = "beacon_node/genesis" } gossipsub = { path = "beacon_node/lighthouse_network/gossipsub/" } +health_metrics = { path = "common/health_metrics" } http_api = { path = "beacon_node/http_api" } initialized_validators = { path = "validator_client/initialized_validators" } int_to_bytes = { path = "consensus/int_to_bytes" } diff --git a/account_manager/src/validator/create.rs b/account_manager/src/validator/create.rs index 73e0ad54d47..3db8c3f152d 100644 --- a/account_manager/src/validator/create.rs +++ b/account_manager/src/validator/create.rs @@ -6,14 +6,13 @@ use account_utils::{ }; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{ - ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR, -}; +use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR, DEFAULT_WALLET_DIR}; use environment::Environment; use eth2_wallet_manager::WalletManager; use slashing_protection::{SlashingDatabase, SLASHING_PROTECTION_FILENAME}; use std::ffi::OsStr; use std::fs; +use std::fs::create_dir_all; use std::path::{Path, PathBuf}; use types::EthSpec; use validator_dir::Builder as ValidatorDirBuilder; @@ -156,8 +155,10 @@ pub fn cli_run( )); } - ensure_dir_exists(&validator_dir)?; - ensure_dir_exists(&secrets_dir)?; + create_dir_all(&validator_dir) + .map_err(|e| format!("Could not create validator dir at {validator_dir:?}: {e:?}"))?; + create_dir_all(&secrets_dir) + .map_err(|e| format!("Could not create secrets dir at {secrets_dir:?}: {e:?}"))?; eprintln!("secrets-dir path {:?}", secrets_dir); eprintln!("wallets-dir path {:?}", wallet_base_dir); diff --git a/account_manager/src/validator/recover.rs b/account_manager/src/validator/recover.rs index ddf754edac9..19d161a468f 100644 --- a/account_manager/src/validator/recover.rs +++ b/account_manager/src/validator/recover.rs @@ -5,10 +5,10 @@ use account_utils::eth2_keystore::{keypair_from_secret, Keystore, KeystoreBuilde use account_utils::{random_password, read_mnemonic_from_cli, STDIN_INPUTS_FLAG}; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::ensure_dir_exists; use directory::{parse_path_or_default_with_flag, DEFAULT_SECRET_DIR}; use eth2_wallet::bip39::Seed; use eth2_wallet::{recover_validator_secret_from_mnemonic, KeyType, ValidatorKeystores}; +use std::fs::create_dir_all; use std::path::PathBuf; use validator_dir::Builder as ValidatorDirBuilder; pub const CMD: &str = "recover"; @@ -91,8 +91,10 @@ pub fn cli_run(matches: &ArgMatches, validator_dir: PathBuf) -> Result<(), Strin eprintln!("secrets-dir path: {:?}", secrets_dir); - ensure_dir_exists(&validator_dir)?; - ensure_dir_exists(&secrets_dir)?; + create_dir_all(&validator_dir) + .map_err(|e| format!("Could not create validator dir at {validator_dir:?}: {e:?}"))?; + create_dir_all(&secrets_dir) + .map_err(|e| format!("Could not create secrets dir at {secrets_dir:?}: {e:?}"))?; eprintln!(); eprintln!("WARNING: KEY RECOVERY CAN LEAD TO DUPLICATING VALIDATORS KEYS, WHICH CAN LEAD TO SLASHING."); diff --git a/account_manager/src/wallet/mod.rs b/account_manager/src/wallet/mod.rs index 020858db772..c34f0363a48 100644 --- a/account_manager/src/wallet/mod.rs +++ b/account_manager/src/wallet/mod.rs @@ -5,7 +5,8 @@ pub mod recover; use crate::WALLETS_DIR_FLAG; use clap::{Arg, ArgAction, ArgMatches, Command}; use clap_utils::FLAG_HEADER; -use directory::{ensure_dir_exists, parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use directory::{parse_path_or_default_with_flag, DEFAULT_WALLET_DIR}; +use std::fs::create_dir_all; use std::path::PathBuf; pub const CMD: &str = "wallet"; @@ -44,7 +45,7 @@ pub fn cli_run(matches: &ArgMatches) -> Result<(), String> { } else { parse_path_or_default_with_flag(matches, WALLETS_DIR_FLAG, DEFAULT_WALLET_DIR)? }; - ensure_dir_exists(&wallet_base_dir)?; + create_dir_all(&wallet_base_dir).map_err(|_| "Could not create wallet base dir")?; eprintln!("wallet-dir path: {:?}", wallet_base_dir); diff --git a/beacon_node/http_api/Cargo.toml b/beacon_node/http_api/Cargo.toml index 5d601008bc0..0ced27e4464 100644 --- a/beacon_node/http_api/Cargo.toml +++ b/beacon_node/http_api/Cargo.toml @@ -17,6 +17,7 @@ ethereum_serde_utils = { workspace = true } ethereum_ssz = { workspace = true } execution_layer = { workspace = true } futures = { workspace = true } +health_metrics = { workspace = true } hex = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } diff --git a/beacon_node/http_api/src/attestation_performance.rs b/beacon_node/http_api/src/attestation_performance.rs index d4f9916814a..2f3f3404456 100644 --- a/beacon_node/http_api/src/attestation_performance.rs +++ b/beacon_node/http_api/src/attestation_performance.rs @@ -7,7 +7,7 @@ use state_processing::{ }; use std::sync::Arc; use types::{BeaconState, BeaconStateError, EthSpec, Hash256}; -use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; +use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; const MAX_REQUEST_RANGE_EPOCHS: usize = 100; const BLOCK_ROOT_CHUNK_SIZE: usize = 100; @@ -50,7 +50,7 @@ pub fn get_attestation_performance( let end_slot = end_epoch.end_slot(T::EthSpec::slots_per_epoch()); // Ensure end_epoch is smaller than the current epoch - 1. - let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(unhandled_error)?; if query.end_epoch >= current_epoch - 1 { return Err(custom_bad_request(format!( "end_epoch must be less than the current epoch - 1. current: {}, end: {}", @@ -83,7 +83,7 @@ pub fn get_attestation_performance( let index_range = if target.to_lowercase() == "global" { chain .with_head(|head| Ok((0..head.beacon_state.validators().len() as u64).collect())) - .map_err(beacon_chain_error)? + .map_err(unhandled_error::)? } else { vec![target.parse::().map_err(|_| { custom_bad_request(format!( @@ -96,10 +96,10 @@ pub fn get_attestation_performance( // Load block roots. let mut block_roots: Vec = chain .forwards_iter_block_roots_until(start_slot, end_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .map(|res| res.map(|(root, _)| root)) .collect::, _>>() - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; block_roots.dedup(); // Load first block so we can get its parent. @@ -113,7 +113,7 @@ pub fn get_attestation_performance( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load the block of the prior slot which will be used to build the starting state. let prior_block = chain @@ -122,14 +122,14 @@ pub fn get_attestation_performance( maybe_block .ok_or_else(|| BeaconChainError::MissingBeaconBlock(first_block.parent_root())) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load state for block replay. let state_root = prior_block.state_root(); let state = chain .get_state(&state_root, Some(prior_slot)) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Allocate an AttestationPerformance vector for each validator in the range. let mut perfs: Vec = @@ -198,7 +198,7 @@ pub fn get_attestation_performance( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) - .map_err(beacon_chain_error) + .map_err(unhandled_error) }) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/attester_duties.rs b/beacon_node/http_api/src/attester_duties.rs index 6c7dc3348c1..8905b24cded 100644 --- a/beacon_node/http_api/src/attester_duties.rs +++ b/beacon_node/http_api/src/attester_duties.rs @@ -16,9 +16,7 @@ pub fn attester_duties( request_indices: &[u64], chain: &BeaconChain, ) -> Result { - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(warp_utils::reject::unhandled_error)?; // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. @@ -57,7 +55,7 @@ fn cached_attestation_duties( let (duties, dependent_root, execution_status) = chain .validator_attestation_duties(request_indices, request_epoch, head_block_root) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( duties, @@ -82,7 +80,7 @@ fn compute_historic_attester_duties( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head = &cached_head.snapshot; if head.beacon_state.current_epoch() <= request_epoch { @@ -131,13 +129,13 @@ fn compute_historic_attester_duties( state .build_committee_cache(relative_epoch, &chain.spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let dependent_root = state // The only block which decides its own shuffling is the genesis block. .attester_shuffling_decision_root(chain.genesis_block_root, relative_epoch) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let duties = request_indices .iter() @@ -147,7 +145,7 @@ fn compute_historic_attester_duties( .map_err(BeaconChainError::from) }) .collect::>() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( duties, @@ -181,7 +179,7 @@ fn ensure_state_knows_attester_duties_for_epoch( // A "partial" state advance is adequate since attester duties don't rely on state roots. partial_state_advance(state, Some(state_root), target_slot, spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; } Ok(()) @@ -208,7 +206,7 @@ fn convert_to_api_response( let usize_indices = indices.iter().map(|i| *i as usize).collect::>(); let index_to_pubkey_map = chain .validator_pubkey_bytes_many(&usize_indices) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let data = duties .into_iter() diff --git a/beacon_node/http_api/src/block_id.rs b/beacon_node/http_api/src/block_id.rs index be70f615e34..cdef1521ec9 100644 --- a/beacon_node/http_api/src/block_id.rs +++ b/beacon_node/http_api/src/block_id.rs @@ -38,7 +38,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.head_block_root(), execution_status.is_optimistic_or_invalid(), @@ -63,10 +63,10 @@ impl BlockId { CoreBlockId::Slot(slot) => { let execution_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let root = chain .block_root_at_slot(*slot, WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|root_opt| { root_opt.ok_or_else(|| { warp_utils::reject::custom_not_found(format!( @@ -96,17 +96,17 @@ impl BlockId { .store .block_exists(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let execution_optimistic = chain .canonical_head .fork_choice_read_lock() .is_optimistic_or_invalid_block(root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let blinded_block = chain .get_blinded_block(root) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "beacon block with root {}", @@ -116,7 +116,7 @@ impl BlockId { let block_slot = blinded_block.slot(); let finalized = chain .is_finalized_block(root, block_slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok((*root, execution_optimistic, finalized)) } else { Err(warp_utils::reject::custom_not_found(format!( @@ -134,7 +134,7 @@ impl BlockId { ) -> Result>, warp::Rejection> { chain .get_blinded_block(root) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) } /// Return the `SignedBeaconBlock` identified by `self`. @@ -154,7 +154,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.snapshot.beacon_block.clone_as_blinded(), execution_status.is_optimistic_or_invalid(), @@ -211,7 +211,7 @@ impl BlockId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok(( cached_head.snapshot.beacon_block.clone(), execution_status.is_optimistic_or_invalid(), @@ -223,7 +223,7 @@ impl BlockId { chain .get_block(&root) .await - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|block_opt| match block_opt { Some(block) => { if block.slot() != *slot { @@ -245,7 +245,7 @@ impl BlockId { chain .get_block(&root) .await - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|block_opt| { block_opt .map(|block| (Arc::new(block), execution_optimistic, finalized)) @@ -311,7 +311,7 @@ impl BlockId { let blob_sidecar_list = chain .store .get_blobs(&root) - .map_err(|e| warp_utils::reject::beacon_chain_error(e.into()))? + .map_err(|e| warp_utils::reject::unhandled_error(BeaconChainError::from(e)))? .blobs() .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("no blobs stored for block {root}")) @@ -356,7 +356,7 @@ impl BlockId { |column_index| match chain.get_data_column(&root, &column_index) { Ok(Some(data_column)) => Some(Ok(data_column)), Ok(None) => None, - Err(e) => Some(Err(warp_utils::reject::beacon_chain_error(e))), + Err(e) => Some(Err(warp_utils::reject::unhandled_error(e))), }, ) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/block_packing_efficiency.rs b/beacon_node/http_api/src/block_packing_efficiency.rs index 66c71872786..431547f10b1 100644 --- a/beacon_node/http_api/src/block_packing_efficiency.rs +++ b/beacon_node/http_api/src/block_packing_efficiency.rs @@ -13,7 +13,7 @@ use types::{ AttestationRef, BeaconCommittee, BeaconState, BeaconStateError, BlindedPayload, ChainSpec, Epoch, EthSpec, Hash256, OwnedBeaconCommittee, RelativeEpoch, SignedBeaconBlock, Slot, }; -use warp_utils::reject::{beacon_chain_error, custom_bad_request, custom_server_error}; +use warp_utils::reject::{custom_bad_request, custom_server_error, unhandled_error}; /// Load blocks from block roots in chunks to reduce load on memory. const BLOCK_ROOT_CHUNK_SIZE: usize = 100; @@ -263,9 +263,9 @@ pub fn get_block_packing_efficiency( // Load block roots. let mut block_roots: Vec = chain .forwards_iter_block_roots_until(start_slot_of_prior_epoch, end_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .collect::, _>>() - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .iter() .map(|(root, _)| *root) .collect(); @@ -280,7 +280,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*first_block_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Load state for block replay. let starting_state_root = first_block.state_root(); @@ -290,7 +290,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_state| { maybe_state.ok_or(BeaconChainError::MissingBeaconState(starting_state_root)) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; // Initialize response vector. let mut response = Vec::new(); @@ -392,7 +392,7 @@ pub fn get_block_packing_efficiency( .and_then(|maybe_block| { maybe_block.ok_or(BeaconChainError::MissingBeaconBlock(*root)) }) - .map_err(beacon_chain_error) + .map_err(unhandled_error) }) .collect::, _>>()?; diff --git a/beacon_node/http_api/src/block_rewards.rs b/beacon_node/http_api/src/block_rewards.rs index ad71e9e9d00..0cc878bb48f 100644 --- a/beacon_node/http_api/src/block_rewards.rs +++ b/beacon_node/http_api/src/block_rewards.rs @@ -7,7 +7,7 @@ use std::num::NonZeroUsize; use std::sync::Arc; use types::beacon_block::BlindedBeaconBlock; use types::non_zero_usize::new_non_zero_usize; -use warp_utils::reject::{beacon_chain_error, beacon_state_error, custom_bad_request}; +use warp_utils::reject::{beacon_state_error, custom_bad_request, unhandled_error}; const STATE_CACHE_SIZE: NonZeroUsize = new_non_zero_usize(2); @@ -30,23 +30,23 @@ pub fn get_block_rewards( let end_block_root = chain .block_root_at_slot(end_slot, WhenSlotSkipped::Prev) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("block at end slot {} unknown", end_slot)))?; let blocks = chain .store .load_blocks_to_replay(start_slot, end_slot, end_block_root) - .map_err(|e| beacon_chain_error(e.into()))?; + .map_err(|e| unhandled_error(BeaconChainError::from(e)))?; let state_root = chain .state_root_at_slot(prior_slot) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| custom_bad_request(format!("prior state at slot {} unknown", prior_slot)))?; let mut state = chain .get_state(&state_root, Some(prior_slot)) .and_then(|maybe_state| maybe_state.ok_or(BeaconChainError::MissingBeaconState(state_root))) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; state .build_caches(&chain.spec) @@ -73,12 +73,12 @@ pub fn get_block_rewards( .state_root_iter( chain .forwards_iter_state_roots_until(prior_slot, end_slot) - .map_err(beacon_chain_error)?, + .map_err(unhandled_error)?, ) .no_signature_verification() .minimal_block_root_verification() .apply_blocks(blocks, None) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; if block_replayer.state_root_miss() { warn!( @@ -125,7 +125,7 @@ pub fn compute_block_rewards( ); let parent_block = chain .get_blinded_block(&parent_root) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( "parent block not known or not canonical: {:?}", @@ -135,7 +135,7 @@ pub fn compute_block_rewards( let parent_state = chain .get_state(&parent_block.state_root(), Some(parent_block.slot())) - .map_err(beacon_chain_error)? + .map_err(unhandled_error)? .ok_or_else(|| { custom_bad_request(format!( "no state known for parent block: {:?}", @@ -148,7 +148,7 @@ pub fn compute_block_rewards( .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() .apply_blocks(vec![], Some(block.slot())) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error::)?; if block_replayer.state_root_miss() { warn!( @@ -176,7 +176,7 @@ pub fn compute_block_rewards( &mut reward_cache, true, ) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; block_rewards.push(block_reward); } diff --git a/beacon_node/http_api/src/build_block_contents.rs b/beacon_node/http_api/src/build_block_contents.rs index c2ccb6695eb..fb8fba0731d 100644 --- a/beacon_node/http_api/src/build_block_contents.rs +++ b/beacon_node/http_api/src/build_block_contents.rs @@ -23,7 +23,7 @@ pub fn build_block_contents( } = block; let Some((kzg_proofs, blobs)) = blob_items else { - return Err(warp_utils::reject::block_production_error( + return Err(warp_utils::reject::unhandled_error( BlockProductionError::MissingBlobs, )); }; diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index febdf692590..d5c6c115670 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -50,6 +50,7 @@ use eth2::types::{ ValidatorStatus, ValidatorsRequestBody, }; use eth2::{CONSENSUS_VERSION_HEADER, CONTENT_TYPE_HEADER, SSZ_CONTENT_TYPE_HEADER}; +use health_metrics::observe::Observe; use lighthouse_network::{types::SyncState, EnrExt, NetworkGlobals, PeerId, PubsubMessage}; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; @@ -938,9 +939,9 @@ pub fn serve( ) } } - _ => { - warp_utils::reject::beacon_chain_error(e.into()) - } + _ => warp_utils::reject::unhandled_error( + BeaconChainError::from(e), + ), } })?; @@ -1067,7 +1068,7 @@ pub fn serve( let validators = chain .validator_indices(sync_committee.pubkeys.iter()) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let validator_aggregates = validators .chunks_exact(T::EthSpec::sync_subcommittee_size()) @@ -1147,7 +1148,7 @@ pub fn serve( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; ( cached_head.head_block_root(), cached_head.snapshot.beacon_block.clone_as_blinded(), @@ -1161,13 +1162,13 @@ pub fn serve( BlockId::from_root(parent_root).blinded_block(&chain)?; let (root, _slot) = chain .forwards_iter_block_roots(parent.slot()) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? // Ignore any skip-slots immediately following the parent. .find(|res| { res.as_ref().is_ok_and(|(root, _)| *root != parent_root) }) .transpose() - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!( "child of block with root {}", @@ -1248,7 +1249,7 @@ pub fn serve( let canonical = chain .block_root_at_slot(block.slot(), WhenSlotSkipped::None) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .is_some_and(|canonical| root == canonical); let data = api_types::BlockHeaderData { @@ -2932,7 +2933,7 @@ pub fn serve( let (head, head_execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head_slot = head.head_slot(); let current_slot = chain.slot_clock.now_or_genesis().ok_or_else(|| { @@ -2992,7 +2993,7 @@ pub fn serve( .blocking_response_task(Priority::P0, move || { let is_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let is_syncing = !network_globals.sync_state.read().is_synced(); @@ -3302,9 +3303,7 @@ pub fn serve( task_spawner.blocking_json_task(Priority::P0, move || { not_synced_filter?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; // allow a tolerance of one slot to account for clock skew if query.slot > current_slot + 1 { @@ -3318,7 +3317,7 @@ pub fn serve( .produce_unaggregated_attestation(query.slot, query.committee_index) .map(|attestation| attestation.data().clone()) .map(api_types::GenericResponse::from) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) }) }, ); @@ -3690,11 +3689,9 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; - let current_slot = chain - .slot() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_slot = chain.slot().map_err(warp_utils::reject::unhandled_error)?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( @@ -3747,12 +3744,12 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let current_slot = chain .slot_clock .now_or_genesis() .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let current_epoch = current_slot.epoch(T::EthSpec::slots_per_epoch()); debug!( @@ -3848,12 +3845,12 @@ pub fn serve( .execution_layer .as_ref() .ok_or(BeaconChainError::ExecutionLayerMissing) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .builder(); let builder = arc_builder .as_ref() .ok_or(BeaconChainError::BuilderMissing) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; builder .post_builder_validators(&filtered_registration_data) .await @@ -3969,9 +3966,8 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); let next_epoch = current_epoch.saturating_add(Epoch::new(1)); @@ -4010,9 +4006,8 @@ pub fn serve( chain: Arc>| { task_spawner.blocking_json_task(Priority::P0, move || { // Ensure the request is for either the current, previous or next epoch. - let current_epoch = chain - .epoch() - .map_err(warp_utils::reject::beacon_chain_error)?; + let current_epoch = + chain.epoch().map_err(warp_utils::reject::unhandled_error)?; let prev_epoch = current_epoch.saturating_sub(Epoch::new(1)); let next_epoch = current_epoch.saturating_add(Epoch::new(1)); diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index ed30da7362c..0e24e8f1758 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -153,7 +153,7 @@ pub async fn produce_blinded_block_v2( BlockProductionVersion::BlindedV2, ) .await - .map_err(warp_utils::reject::block_production_error)?; + .map_err(warp_utils::reject::unhandled_error)?; build_response_v2(chain, block_response_type, endpoint_version, accept_header) } @@ -184,7 +184,7 @@ pub async fn produce_block_v2( BlockProductionVersion::FullV2, ) .await - .map_err(warp_utils::reject::block_production_error)?; + .map_err(warp_utils::reject::unhandled_error)?; build_response_v2(chain, block_response_type, endpoint_version, accept_header) } diff --git a/beacon_node/http_api/src/proposer_duties.rs b/beacon_node/http_api/src/proposer_duties.rs index 515599ce887..c4945df9d70 100644 --- a/beacon_node/http_api/src/proposer_duties.rs +++ b/beacon_node/http_api/src/proposer_duties.rs @@ -26,7 +26,7 @@ pub fn proposer_duties( .now_or_genesis() .map(|slot| slot.epoch(T::EthSpec::slots_per_epoch())) .ok_or(BeaconChainError::UnableToReadSlot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Determine what the current epoch would be if we fast-forward our system clock by // `MAXIMUM_GOSSIP_CLOCK_DISPARITY`. @@ -66,7 +66,7 @@ pub fn proposer_duties( { let (proposers, dependent_root, execution_status, _fork) = compute_proposer_duties_from_head(request_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, request_epoch, @@ -114,7 +114,7 @@ fn try_proposer_duties_from_cache( .map_err(warp_utils::reject::beacon_state_error)?; let execution_optimistic = chain .is_optimistic_or_invalid_head_block(head_block) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let dependent_root = match head_epoch.cmp(&request_epoch) { // head_epoch == request_epoch @@ -163,7 +163,7 @@ fn compute_and_cache_proposer_duties( ) -> Result { let (indices, dependent_root, execution_status, fork) = compute_proposer_duties_from_head(current_epoch, chain) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Prime the proposer shuffling cache with the newly-learned value. chain @@ -171,7 +171,7 @@ fn compute_and_cache_proposer_duties( .lock() .insert(current_epoch, dependent_root, indices.clone(), fork) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response( chain, @@ -195,7 +195,7 @@ fn compute_historic_proposer_duties( let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let head = &cached_head.snapshot; if head.beacon_state.current_epoch() <= epoch { @@ -214,7 +214,7 @@ fn compute_historic_proposer_duties( // If we've loaded the head state it might be from a previous epoch, ensure it's in a // suitable epoch. ensure_state_is_in_epoch(&mut state, state_root, epoch, &chain.spec) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; (state, execution_optimistic) } else { let (state, execution_optimistic, _finalized) = @@ -234,14 +234,14 @@ fn compute_historic_proposer_duties( let indices = state .get_beacon_proposer_indices(&chain.spec) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // We can supply the genesis block root as the block root since we know that the only block that // decides its own root is the genesis block. let dependent_root = state .proposer_shuffling_decision_root(chain.genesis_block_root) .map_err(BeaconChainError::from) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; convert_to_api_response(chain, epoch, dependent_root, execution_optimistic, indices) } @@ -257,7 +257,7 @@ fn convert_to_api_response( ) -> Result { let index_to_pubkey_map = chain .validator_pubkey_bytes_many(&indices) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Map our internal data structure into the API structure. let proposer_data = indices diff --git a/beacon_node/http_api/src/standard_block_rewards.rs b/beacon_node/http_api/src/standard_block_rewards.rs index 1ab75374ea8..372a2765da4 100644 --- a/beacon_node/http_api/src/standard_block_rewards.rs +++ b/beacon_node/http_api/src/standard_block_rewards.rs @@ -4,7 +4,7 @@ use crate::ExecutionOptimistic; use beacon_chain::{BeaconChain, BeaconChainTypes}; use eth2::lighthouse::StandardBlockReward; use std::sync::Arc; -use warp_utils::reject::beacon_chain_error; +use warp_utils::reject::unhandled_error; /// The difference between block_rewards and beacon_block_rewards is the later returns block /// reward format that satisfies beacon-api specs pub fn compute_beacon_block_rewards( @@ -19,7 +19,7 @@ pub fn compute_beacon_block_rewards( let rewards = chain .compute_beacon_block_reward(block_ref, &mut state) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; Ok((rewards, execution_optimistic, finalized)) } diff --git a/beacon_node/http_api/src/state_id.rs b/beacon_node/http_api/src/state_id.rs index ddacde9a3fc..353390cdad7 100644 --- a/beacon_node/http_api/src/state_id.rs +++ b/beacon_node/http_api/src/state_id.rs @@ -30,7 +30,7 @@ impl StateId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok(( cached_head.head_state_root(), execution_status.is_optimistic_or_invalid(), @@ -56,7 +56,7 @@ impl StateId { *slot, chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?, + .map_err(warp_utils::reject::unhandled_error)?, *slot <= chain .canonical_head @@ -70,11 +70,11 @@ impl StateId { .store .load_hot_state_summary(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let finalization_status = chain .state_finalization_and_canonicity(root, hot_summary.slot) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; let finalized = finalization_status.is_finalized(); let fork_choice = chain.canonical_head.fork_choice_read_lock(); let execution_optimistic = if finalization_status.slot_is_finalized @@ -94,14 +94,14 @@ impl StateId { fork_choice .is_optimistic_or_invalid_block(&hot_summary.latest_block_root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? }; return Ok((*root, execution_optimistic, finalized)); } else if let Some(_cold_state_slot) = chain .store .load_cold_state_slot(root) .map_err(BeaconChainError::DBError) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? { let fork_choice = chain.canonical_head.fork_choice_read_lock(); let finalized_root = fork_choice @@ -111,7 +111,7 @@ impl StateId { let execution_optimistic = fork_choice .is_optimistic_or_invalid_block_no_fallback(&finalized_root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok((*root, execution_optimistic, true)); } else { return Err(warp_utils::reject::custom_not_found(format!( @@ -124,7 +124,7 @@ impl StateId { let root = chain .state_root_at_slot(slot) - .map_err(warp_utils::reject::beacon_chain_error)? + .map_err(warp_utils::reject::unhandled_error)? .ok_or_else(|| { warp_utils::reject::custom_not_found(format!("beacon state at slot {}", slot)) })?; @@ -178,7 +178,7 @@ impl StateId { let (cached_head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return Ok(( cached_head.snapshot.beacon_state.clone(), execution_status.is_optimistic_or_invalid(), @@ -191,7 +191,7 @@ impl StateId { let state = chain .get_state(&state_root, slot_opt) - .map_err(warp_utils::reject::beacon_chain_error) + .map_err(warp_utils::reject::unhandled_error) .and_then(|opt| { opt.ok_or_else(|| { warp_utils::reject::custom_not_found(format!( @@ -224,7 +224,7 @@ impl StateId { let (head, execution_status) = chain .canonical_head .head_and_execution_status() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; return func( &head.snapshot.beacon_state, execution_status.is_optimistic_or_invalid(), @@ -273,7 +273,7 @@ pub fn checkpoint_slot_and_execution_optimistic( let execution_optimistic = fork_choice .is_optimistic_or_invalid_block_no_fallback(root) .map_err(BeaconChainError::ForkChoiceError) - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; Ok((slot, execution_optimistic)) } diff --git a/beacon_node/http_api/src/sync_committee_rewards.rs b/beacon_node/http_api/src/sync_committee_rewards.rs index 68a06b1ce8c..ec633724060 100644 --- a/beacon_node/http_api/src/sync_committee_rewards.rs +++ b/beacon_node/http_api/src/sync_committee_rewards.rs @@ -6,7 +6,7 @@ use slog::{debug, Logger}; use state_processing::BlockReplayer; use std::sync::Arc; use types::{BeaconState, SignedBlindedBeaconBlock}; -use warp_utils::reject::{beacon_chain_error, custom_not_found}; +use warp_utils::reject::{custom_not_found, unhandled_error}; pub fn compute_sync_committee_rewards( chain: Arc>, @@ -20,7 +20,7 @@ pub fn compute_sync_committee_rewards( let reward_payload = chain .compute_sync_committee_rewards(block.message(), &mut state) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; let data = if reward_payload.is_empty() { debug!(log, "compute_sync_committee_rewards returned empty"); @@ -71,7 +71,7 @@ pub fn get_state_before_applying_block( .state_root_iter([Ok((parent_block.state_root(), parent_block.slot()))].into_iter()) .minimal_block_root_verification() .apply_blocks(vec![], Some(block.slot())) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error::)?; Ok(replayer.into_state()) } diff --git a/beacon_node/http_api/src/sync_committees.rs b/beacon_node/http_api/src/sync_committees.rs index 3e5b1dc5247..da9f9b7a063 100644 --- a/beacon_node/http_api/src/sync_committees.rs +++ b/beacon_node/http_api/src/sync_committees.rs @@ -39,7 +39,7 @@ pub fn sync_committee_duties( // still dependent on the head. So using `is_optimistic_head` is fine for both cases. let execution_optimistic = chain .is_optimistic_or_invalid_head() - .map_err(warp_utils::reject::beacon_chain_error)?; + .map_err(warp_utils::reject::unhandled_error)?; // Try using the head's sync committees to satisfy the request. This should be sufficient for // the vast majority of requests. Rather than checking if we think the request will succeed in a @@ -55,7 +55,7 @@ pub fn sync_committee_duties( .. })) | Err(BeaconChainError::SyncDutiesError(BeaconStateError::IncorrectStateVariant)) => (), - Err(e) => return Err(warp_utils::reject::beacon_chain_error(e)), + Err(e) => return Err(warp_utils::reject::unhandled_error(e)), } let duties = duties_from_state_load(request_epoch, request_indices, altair_fork_epoch, chain) @@ -67,7 +67,7 @@ pub fn sync_committee_duties( "invalid epoch: {}, current epoch: {}", request_epoch, current_epoch )), - e => warp_utils::reject::beacon_chain_error(e), + e => warp_utils::reject::unhandled_error(e), })?; Ok(convert_to_response( verify_unknown_validators(duties, request_epoch, chain)?, @@ -164,7 +164,7 @@ fn verify_unknown_validators( BeaconChainError::SyncDutiesError(BeaconStateError::UnknownValidator(idx)) => { warp_utils::reject::custom_bad_request(format!("invalid validator index: {idx}")) } - e => warp_utils::reject::beacon_chain_error(e), + e => warp_utils::reject::unhandled_error(e), }) } diff --git a/beacon_node/http_api/src/ui.rs b/beacon_node/http_api/src/ui.rs index 616745dbefe..80a9ed896db 100644 --- a/beacon_node/http_api/src/ui.rs +++ b/beacon_node/http_api/src/ui.rs @@ -5,7 +5,7 @@ use eth2::types::{Epoch, ValidatorStatus}; use serde::{Deserialize, Serialize}; use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use warp_utils::reject::beacon_chain_error; +use warp_utils::reject::unhandled_error; #[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)] pub struct ValidatorCountResponse { @@ -58,7 +58,7 @@ pub fn get_validator_count( } Ok::<(), BeaconChainError>(()) }) - .map_err(beacon_chain_error)?; + .map_err(unhandled_error)?; Ok(ValidatorCountResponse { active_ongoing, @@ -101,7 +101,7 @@ pub fn get_validator_info( request_data: ValidatorInfoRequestData, chain: Arc>, ) -> Result { - let current_epoch = chain.epoch().map_err(beacon_chain_error)?; + let current_epoch = chain.epoch().map_err(unhandled_error)?; let epochs = current_epoch.saturating_sub(HISTORIC_EPOCHS).as_u64()..=current_epoch.as_u64(); diff --git a/beacon_node/http_metrics/Cargo.toml b/beacon_node/http_metrics/Cargo.toml index d92f986440c..9ad073439d1 100644 --- a/beacon_node/http_metrics/Cargo.toml +++ b/beacon_node/http_metrics/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] beacon_chain = { workspace = true } +health_metrics = { workspace = true } lighthouse_network = { workspace = true } lighthouse_version = { workspace = true } malloc_utils = { workspace = true } diff --git a/beacon_node/http_metrics/src/metrics.rs b/beacon_node/http_metrics/src/metrics.rs index d751c51e4c9..bcfb8e4c9cf 100644 --- a/beacon_node/http_metrics/src/metrics.rs +++ b/beacon_node/http_metrics/src/metrics.rs @@ -39,7 +39,7 @@ pub fn gather_prometheus_metrics( lighthouse_network::scrape_discovery_metrics(); - warp_utils::metrics::scrape_health_metrics(); + health_metrics::metrics::scrape_health_metrics(); // It's important to ensure these metrics are explicitly enabled in the case that users aren't // using glibc and this function causes panics. diff --git a/common/account_utils/Cargo.toml b/common/account_utils/Cargo.toml index dece975d37e..3ab60346886 100644 --- a/common/account_utils/Cargo.toml +++ b/common/account_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -directory = { workspace = true } eth2_keystore = { workspace = true } eth2_wallet = { workspace = true } filesystem = { workspace = true } diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 24f6861daa2..7337d6dfb40 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -4,13 +4,12 @@ //! attempt) to load into the `crate::intialized_validators::InitializedValidators` struct. use crate::{default_keystore_password_path, read_password_string, write_file_via_temporary}; -use directory::ensure_dir_exists; use eth2_keystore::Keystore; use regex::Regex; use serde::{Deserialize, Serialize}; use slog::{error, Logger}; use std::collections::HashSet; -use std::fs::{self, File}; +use std::fs::{self, create_dir_all, File}; use std::io; use std::path::{Path, PathBuf}; use types::{graffiti::GraffitiString, Address, PublicKey}; @@ -229,7 +228,7 @@ impl From> for ValidatorDefinitions { impl ValidatorDefinitions { /// Open an existing file or create a new, empty one if it does not exist. pub fn open_or_create>(validators_dir: P) -> Result { - ensure_dir_exists(validators_dir.as_ref()).map_err(|_| { + create_dir_all(validators_dir.as_ref()).map_err(|_| { Error::UnableToCreateValidatorDir(PathBuf::from(validators_dir.as_ref())) })?; let config_path = validators_dir.as_ref().join(CONFIG_FILENAME); diff --git a/common/directory/src/lib.rs b/common/directory/src/lib.rs index df03b4f9a4e..d042f8dfadc 100644 --- a/common/directory/src/lib.rs +++ b/common/directory/src/lib.rs @@ -1,6 +1,6 @@ use clap::ArgMatches; pub use eth2_network_config::DEFAULT_HARDCODED_NETWORK; -use std::fs::{self, create_dir_all}; +use std::fs; use std::path::{Path, PathBuf}; /// Names for the default directories. @@ -30,17 +30,6 @@ pub fn get_network_dir(matches: &ArgMatches) -> String { } } -/// Checks if a directory exists in the given path and creates a directory if it does not exist. -pub fn ensure_dir_exists>(path: P) -> Result<(), String> { - let path = path.as_ref(); - - if !path.exists() { - create_dir_all(path).map_err(|e| format!("Unable to create {:?}: {:?}", path, e))?; - } - - Ok(()) -} - /// If `arg` is in `matches`, parses the value as a path. /// /// Otherwise, attempts to find the default directory for the `testnet` from the `matches`. diff --git a/common/eth2/Cargo.toml b/common/eth2/Cargo.toml index 9d6dea100d4..ca7fa7ccdbe 100644 --- a/common/eth2/Cargo.toml +++ b/common/eth2/Cargo.toml @@ -31,10 +31,6 @@ zeroize = { workspace = true } [dev-dependencies] tokio = { workspace = true } -[target.'cfg(target_os = "linux")'.dependencies] -psutil = { version = "3.3.0", optional = true } -procfs = { version = "0.15.1", optional = true } - [features] default = ["lighthouse"] -lighthouse = ["psutil", "procfs"] +lighthouse = [] diff --git a/common/eth2/src/lighthouse.rs b/common/eth2/src/lighthouse.rs index 66dd5d779bd..badc4857c4c 100644 --- a/common/eth2/src/lighthouse.rs +++ b/common/eth2/src/lighthouse.rs @@ -88,12 +88,6 @@ pub struct ValidatorInclusionData { pub is_previous_epoch_head_attester: bool, } -#[cfg(target_os = "linux")] -use { - psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, - psutil::process::Process, -}; - /// Reports on the health of the Lighthouse instance. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Health { @@ -164,69 +158,6 @@ pub struct SystemHealth { pub misc_os: String, } -impl SystemHealth { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - let vm = psutil::memory::virtual_memory() - .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; - let loadavg = - psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; - - let cpu = - psutil::cpu::cpu_times().map_err(|e| format!("Unable to get cpu times: {:?}", e))?; - - let disk_usage = psutil::disk::disk_usage("/") - .map_err(|e| format!("Unable to disk usage info: {:?}", e))?; - - let disk = psutil::disk::DiskIoCountersCollector::default() - .disk_io_counters() - .map_err(|e| format!("Unable to get disk counters: {:?}", e))?; - - let net = psutil::network::NetIoCountersCollector::default() - .net_io_counters() - .map_err(|e| format!("Unable to get network io counters: {:?}", e))?; - - let boot_time = psutil::host::boot_time() - .map_err(|e| format!("Unable to get system boot time: {:?}", e))? - .duration_since(std::time::UNIX_EPOCH) - .map_err(|e| format!("Boot time is lower than unix epoch: {}", e))? - .as_secs(); - - Ok(Self { - sys_virt_mem_total: vm.total(), - sys_virt_mem_available: vm.available(), - sys_virt_mem_used: vm.used(), - sys_virt_mem_free: vm.free(), - sys_virt_mem_cached: vm.cached(), - sys_virt_mem_buffers: vm.buffers(), - sys_virt_mem_percent: vm.percent(), - sys_loadavg_1: loadavg.one, - sys_loadavg_5: loadavg.five, - sys_loadavg_15: loadavg.fifteen, - cpu_cores: psutil::cpu::cpu_count_physical(), - cpu_threads: psutil::cpu::cpu_count(), - system_seconds_total: cpu.system().as_secs(), - cpu_time_total: cpu.total().as_secs(), - user_seconds_total: cpu.user().as_secs(), - iowait_seconds_total: cpu.iowait().as_secs(), - idle_seconds_total: cpu.idle().as_secs(), - disk_node_bytes_total: disk_usage.total(), - disk_node_bytes_free: disk_usage.free(), - disk_node_reads_total: disk.read_count(), - disk_node_writes_total: disk.write_count(), - network_node_bytes_total_received: net.bytes_recv(), - network_node_bytes_total_transmit: net.bytes_sent(), - misc_node_boot_ts_seconds: boot_time, - misc_os: std::env::consts::OS.to_string(), - }) - } -} - /// Process specific health #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct ProcessHealth { @@ -244,59 +175,6 @@ pub struct ProcessHealth { pub pid_process_seconds_total: u64, } -impl ProcessHealth { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - let process = - Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; - - let process_mem = process - .memory_info() - .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; - - let me = procfs::process::Process::myself() - .map_err(|e| format!("Unable to get process: {:?}", e))?; - let stat = me - .stat() - .map_err(|e| format!("Unable to get stat: {:?}", e))?; - - let process_times = process - .cpu_times() - .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; - - Ok(Self { - pid: process.pid(), - pid_num_threads: stat.num_threads, - pid_mem_resident_set_size: process_mem.rss(), - pid_mem_virtual_memory_size: process_mem.vms(), - pid_mem_shared_memory_size: process_mem.shared(), - pid_process_seconds_total: process_times.busy().as_secs() - + process_times.children_system().as_secs() - + process_times.children_system().as_secs(), - }) - } -} - -impl Health { - #[cfg(not(target_os = "linux"))] - pub fn observe() -> Result { - Err("Health is only available on Linux".into()) - } - - #[cfg(target_os = "linux")] - pub fn observe() -> Result { - Ok(Self { - process: ProcessHealth::observe()?, - system: SystemHealth::observe()?, - }) - } -} - /// Indicates how up-to-date the Eth1 caches are. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] pub struct Eth1SyncStatusData { diff --git a/common/health_metrics/Cargo.toml b/common/health_metrics/Cargo.toml new file mode 100644 index 00000000000..08591471b20 --- /dev/null +++ b/common/health_metrics/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "health_metrics" +version = "0.1.0" +edition = { workspace = true } + +[dependencies] +eth2 = { workspace = true } +metrics = { workspace = true } + +[target.'cfg(target_os = "linux")'.dependencies] +psutil = "3.3.0" +procfs = "0.15.1" diff --git a/common/health_metrics/src/lib.rs b/common/health_metrics/src/lib.rs new file mode 100644 index 00000000000..bab80fb9128 --- /dev/null +++ b/common/health_metrics/src/lib.rs @@ -0,0 +1,2 @@ +pub mod metrics; +pub mod observe; diff --git a/common/warp_utils/src/metrics.rs b/common/health_metrics/src/metrics.rs similarity index 99% rename from common/warp_utils/src/metrics.rs rename to common/health_metrics/src/metrics.rs index fabcf936507..c216426b7d3 100644 --- a/common/warp_utils/src/metrics.rs +++ b/common/health_metrics/src/metrics.rs @@ -1,3 +1,4 @@ +use crate::observe::Observe; use eth2::lighthouse::{ProcessHealth, SystemHealth}; use metrics::*; use std::sync::LazyLock; diff --git a/common/health_metrics/src/observe.rs b/common/health_metrics/src/observe.rs new file mode 100644 index 00000000000..81bb8e6f7e4 --- /dev/null +++ b/common/health_metrics/src/observe.rs @@ -0,0 +1,127 @@ +use eth2::lighthouse::{Health, ProcessHealth, SystemHealth}; + +#[cfg(target_os = "linux")] +use { + psutil::cpu::os::linux::CpuTimesExt, psutil::memory::os::linux::VirtualMemoryExt, + psutil::process::Process, +}; + +pub trait Observe: Sized { + fn observe() -> Result; +} + +impl Observe for Health { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + Ok(Self { + process: ProcessHealth::observe()?, + system: SystemHealth::observe()?, + }) + } +} + +impl Observe for SystemHealth { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + let vm = psutil::memory::virtual_memory() + .map_err(|e| format!("Unable to get virtual memory: {:?}", e))?; + let loadavg = + psutil::host::loadavg().map_err(|e| format!("Unable to get loadavg: {:?}", e))?; + + let cpu = + psutil::cpu::cpu_times().map_err(|e| format!("Unable to get cpu times: {:?}", e))?; + + let disk_usage = psutil::disk::disk_usage("/") + .map_err(|e| format!("Unable to disk usage info: {:?}", e))?; + + let disk = psutil::disk::DiskIoCountersCollector::default() + .disk_io_counters() + .map_err(|e| format!("Unable to get disk counters: {:?}", e))?; + + let net = psutil::network::NetIoCountersCollector::default() + .net_io_counters() + .map_err(|e| format!("Unable to get network io counters: {:?}", e))?; + + let boot_time = psutil::host::boot_time() + .map_err(|e| format!("Unable to get system boot time: {:?}", e))? + .duration_since(std::time::UNIX_EPOCH) + .map_err(|e| format!("Boot time is lower than unix epoch: {}", e))? + .as_secs(); + + Ok(Self { + sys_virt_mem_total: vm.total(), + sys_virt_mem_available: vm.available(), + sys_virt_mem_used: vm.used(), + sys_virt_mem_free: vm.free(), + sys_virt_mem_cached: vm.cached(), + sys_virt_mem_buffers: vm.buffers(), + sys_virt_mem_percent: vm.percent(), + sys_loadavg_1: loadavg.one, + sys_loadavg_5: loadavg.five, + sys_loadavg_15: loadavg.fifteen, + cpu_cores: psutil::cpu::cpu_count_physical(), + cpu_threads: psutil::cpu::cpu_count(), + system_seconds_total: cpu.system().as_secs(), + cpu_time_total: cpu.total().as_secs(), + user_seconds_total: cpu.user().as_secs(), + iowait_seconds_total: cpu.iowait().as_secs(), + idle_seconds_total: cpu.idle().as_secs(), + disk_node_bytes_total: disk_usage.total(), + disk_node_bytes_free: disk_usage.free(), + disk_node_reads_total: disk.read_count(), + disk_node_writes_total: disk.write_count(), + network_node_bytes_total_received: net.bytes_recv(), + network_node_bytes_total_transmit: net.bytes_sent(), + misc_node_boot_ts_seconds: boot_time, + misc_os: std::env::consts::OS.to_string(), + }) + } +} + +impl Observe for ProcessHealth { + #[cfg(not(target_os = "linux"))] + fn observe() -> Result { + Err("Health is only available on Linux".into()) + } + + #[cfg(target_os = "linux")] + fn observe() -> Result { + let process = + Process::current().map_err(|e| format!("Unable to get current process: {:?}", e))?; + + let process_mem = process + .memory_info() + .map_err(|e| format!("Unable to get process memory info: {:?}", e))?; + + let me = procfs::process::Process::myself() + .map_err(|e| format!("Unable to get process: {:?}", e))?; + let stat = me + .stat() + .map_err(|e| format!("Unable to get stat: {:?}", e))?; + + let process_times = process + .cpu_times() + .map_err(|e| format!("Unable to get process cpu times : {:?}", e))?; + + Ok(Self { + pid: process.pid(), + pid_num_threads: stat.num_threads, + pid_mem_resident_set_size: process_mem.rss(), + pid_mem_virtual_memory_size: process_mem.vms(), + pid_mem_shared_memory_size: process_mem.shared(), + pid_process_seconds_total: process_times.busy().as_secs() + + process_times.children_system().as_secs() + + process_times.children_system().as_secs(), + }) + } +} diff --git a/common/monitoring_api/Cargo.toml b/common/monitoring_api/Cargo.toml index 5008c86e858..cb52cff29a5 100644 --- a/common/monitoring_api/Cargo.toml +++ b/common/monitoring_api/Cargo.toml @@ -7,6 +7,7 @@ edition = { workspace = true } [dependencies] eth2 = { workspace = true } +health_metrics = { workspace = true } lighthouse_version = { workspace = true } metrics = { workspace = true } regex = { workspace = true } diff --git a/common/monitoring_api/src/gather.rs b/common/monitoring_api/src/gather.rs index 2f6c820f562..43bea35a933 100644 --- a/common/monitoring_api/src/gather.rs +++ b/common/monitoring_api/src/gather.rs @@ -1,4 +1,5 @@ use super::types::{BeaconProcessMetrics, ValidatorProcessMetrics}; +use health_metrics::observe::Observe; use metrics::{MetricFamily, MetricType}; use serde_json::json; use std::collections::HashMap; diff --git a/common/monitoring_api/src/lib.rs b/common/monitoring_api/src/lib.rs index 9592c50a404..6f919971b02 100644 --- a/common/monitoring_api/src/lib.rs +++ b/common/monitoring_api/src/lib.rs @@ -4,6 +4,7 @@ use std::{path::PathBuf, time::Duration}; use eth2::lighthouse::SystemHealth; use gather::{gather_beacon_metrics, gather_validator_metrics}; +use health_metrics::observe::Observe; use reqwest::{IntoUrl, Response}; pub use reqwest::{StatusCode, Url}; use sensitive_url::SensitiveUrl; diff --git a/common/validator_dir/Cargo.toml b/common/validator_dir/Cargo.toml index 773431c93c6..4c03b7662ed 100644 --- a/common/validator_dir/Cargo.toml +++ b/common/validator_dir/Cargo.toml @@ -12,7 +12,6 @@ insecure_keys = [] bls = { workspace = true } deposit_contract = { workspace = true } derivative = { workspace = true } -directory = { workspace = true } eth2_keystore = { workspace = true } filesystem = { workspace = true } hex = { workspace = true } diff --git a/common/validator_dir/src/builder.rs b/common/validator_dir/src/builder.rs index 3d5d1496082..2e971a8b1ae 100644 --- a/common/validator_dir/src/builder.rs +++ b/common/validator_dir/src/builder.rs @@ -1,7 +1,6 @@ use crate::{Error as DirError, ValidatorDir}; use bls::get_withdrawal_credentials; use deposit_contract::{encode_eth1_tx_data, Error as DepositError}; -use directory::ensure_dir_exists; use eth2_keystore::{Error as KeystoreError, Keystore, KeystoreBuilder, PlainText}; use filesystem::create_with_600_perms; use rand::{distributions::Alphanumeric, Rng}; @@ -42,7 +41,7 @@ pub enum Error { #[cfg(feature = "insecure_keys")] InsecureKeysError(String), MissingPasswordDir, - UnableToCreatePasswordDir(String), + UnableToCreatePasswordDir(io::Error), } impl From for Error { @@ -163,7 +162,7 @@ impl<'a> Builder<'a> { } if let Some(password_dir) = &self.password_dir { - ensure_dir_exists(password_dir).map_err(Error::UnableToCreatePasswordDir)?; + create_dir_all(password_dir).map_err(Error::UnableToCreatePasswordDir)?; } // The withdrawal keystore must be initialized in order to store it or create an eth1 diff --git a/common/warp_utils/Cargo.toml b/common/warp_utils/Cargo.toml index 4a3cde54a9a..ec2d23686b1 100644 --- a/common/warp_utils/Cargo.toml +++ b/common/warp_utils/Cargo.toml @@ -6,7 +6,6 @@ edition = { workspace = true } # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -beacon_chain = { workspace = true } bytes = { workspace = true } eth2 = { workspace = true } headers = "0.3.2" @@ -15,7 +14,6 @@ safe_arith = { workspace = true } serde = { workspace = true } serde_array_query = "0.1.0" serde_json = { workspace = true } -state_processing = { workspace = true } tokio = { workspace = true } types = { workspace = true } warp = { workspace = true } diff --git a/common/warp_utils/src/lib.rs b/common/warp_utils/src/lib.rs index 55ee423fa41..c10adbac0df 100644 --- a/common/warp_utils/src/lib.rs +++ b/common/warp_utils/src/lib.rs @@ -3,7 +3,6 @@ pub mod cors; pub mod json; -pub mod metrics; pub mod query; pub mod reject; pub mod task; diff --git a/common/warp_utils/src/reject.rs b/common/warp_utils/src/reject.rs index bbd5274a7eb..3c7ef5e4fa7 100644 --- a/common/warp_utils/src/reject.rs +++ b/common/warp_utils/src/reject.rs @@ -2,6 +2,7 @@ use eth2::types::{ErrorMessage, Failure, IndexedErrorMessage}; use std::convert::Infallible; use std::error::Error; use std::fmt; +use std::fmt::Debug; use warp::{http::StatusCode, reject::Reject, reply::Response, Reply}; #[derive(Debug)] @@ -19,15 +20,6 @@ pub fn server_sent_event_error(s: String) -> ServerSentEventError { ServerSentEventError(s) } -#[derive(Debug)] -pub struct BeaconChainError(pub beacon_chain::BeaconChainError); - -impl Reject for BeaconChainError {} - -pub fn beacon_chain_error(e: beacon_chain::BeaconChainError) -> warp::reject::Rejection { - warp::reject::custom(BeaconChainError(e)) -} - #[derive(Debug)] pub struct BeaconStateError(pub types::BeaconStateError); @@ -47,21 +39,12 @@ pub fn arith_error(e: safe_arith::ArithError) -> warp::reject::Rejection { } #[derive(Debug)] -pub struct SlotProcessingError(pub state_processing::SlotProcessingError); - -impl Reject for SlotProcessingError {} - -pub fn slot_processing_error(e: state_processing::SlotProcessingError) -> warp::reject::Rejection { - warp::reject::custom(SlotProcessingError(e)) -} - -#[derive(Debug)] -pub struct BlockProductionError(pub beacon_chain::BlockProductionError); +pub struct UnhandledError(pub Box); -impl Reject for BlockProductionError {} +impl Reject for UnhandledError {} -pub fn block_production_error(e: beacon_chain::BlockProductionError) -> warp::reject::Rejection { - warp::reject::custom(BlockProductionError(e)) +pub fn unhandled_error(e: D) -> warp::reject::Rejection { + warp::reject::custom(UnhandledError(Box::new(e))) } #[derive(Debug)] @@ -191,16 +174,7 @@ pub async fn handle_rejection(err: warp::Rejection) -> Result() { code = StatusCode::BAD_REQUEST; message = format!("BAD_REQUEST: invalid query: {}", e); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { - code = StatusCode::INTERNAL_SERVER_ERROR; - message = format!("UNHANDLED_ERROR: {:?}", e.0); - } else if let Some(e) = err.find::() { + } else if let Some(e) = err.find::() { code = StatusCode::INTERNAL_SERVER_ERROR; message = format!("UNHANDLED_ERROR: {:?}", e.0); } else if let Some(e) = err.find::() { diff --git a/validator_client/http_api/Cargo.toml b/validator_client/http_api/Cargo.toml index 76a021ab8c3..651e658a7a5 100644 --- a/validator_client/http_api/Cargo.toml +++ b/validator_client/http_api/Cargo.toml @@ -21,6 +21,7 @@ eth2_keystore = { workspace = true } ethereum_serde_utils = { workspace = true } filesystem = { workspace = true } graffiti_file = { workspace = true } +health_metrics = { workspace = true } initialized_validators = { workspace = true } lighthouse_version = { workspace = true } logging = { workspace = true } diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 73ebe717af3..9c3e3da63d1 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -32,6 +32,7 @@ use eth2::lighthouse_vc::{ PublicKeyBytes, SetGraffitiRequest, }, }; +use health_metrics::observe::Observe; use lighthouse_version::version_with_platform; use logging::SSELoggingComponents; use parking_lot::RwLock; diff --git a/validator_client/http_metrics/Cargo.toml b/validator_client/http_metrics/Cargo.toml index c29a4d18fa0..a3432410bc7 100644 --- a/validator_client/http_metrics/Cargo.toml +++ b/validator_client/http_metrics/Cargo.toml @@ -5,6 +5,7 @@ edition = { workspace = true } authors = ["Sigma Prime "] [dependencies] +health_metrics = { workspace = true } lighthouse_version = { workspace = true } malloc_utils = { workspace = true } metrics = { workspace = true } diff --git a/validator_client/http_metrics/src/lib.rs b/validator_client/http_metrics/src/lib.rs index 984b752e5a5..f1c6d4ed8ad 100644 --- a/validator_client/http_metrics/src/lib.rs +++ b/validator_client/http_metrics/src/lib.rs @@ -206,7 +206,7 @@ pub fn gather_prometheus_metrics( scrape_allocator_metrics(); } - warp_utils::metrics::scrape_health_metrics(); + health_metrics::metrics::scrape_health_metrics(); encoder .encode(&metrics::gather(), &mut buffer)