Skip to content

Commit

Permalink
Merge remote-tracking branch 'origin/unstable' into decouple-subnets
Browse files Browse the repository at this point in the history
  • Loading branch information
jimmygchen committed Jan 14, 2025
2 parents b3ecd95 + 93f9c2c commit 357e3e9
Show file tree
Hide file tree
Showing 38 changed files with 439 additions and 257 deletions.
7 changes: 6 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,13 @@ members = [
"beacon_node/client",
"beacon_node/eth1",
"beacon_node/execution_layer",
"beacon_node/genesis",
"beacon_node/http_api",
"beacon_node/http_metrics",
"beacon_node/lighthouse_network",
"beacon_node/lighthouse_network/gossipsub",
"beacon_node/network",
"beacon_node/operation_pool",
"beacon_node/store",
"beacon_node/timer",

Expand All @@ -30,6 +32,7 @@ members = [
"common/eth2_interop_keypairs",
"common/eth2_network_config",
"common/eth2_wallet_manager",
"common/filesystem",
"common/lighthouse_version",
"common/lockfile",
"common/logging",
Expand All @@ -48,14 +51,16 @@ members = [
"common/unused_port",
"common/validator_dir",
"common/warp_utils",

"consensus/fixed_bytes",
"consensus/fork_choice",

"consensus/int_to_bytes",
"consensus/merkle_proof",
"consensus/proto_array",
"consensus/safe_arith",
"consensus/state_processing",
"consensus/swap_or_not_shuffle",
"consensus/types",

"crypto/bls",
"crypto/eth2_key_derivation",
Expand Down
82 changes: 33 additions & 49 deletions beacon_node/beacon_chain/tests/validator_monitor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use beacon_chain::test_utils::{
use beacon_chain::validator_monitor::{ValidatorMonitorConfig, MISSED_BLOCK_LAG_SLOTS};
use logging::test_logger;
use std::sync::LazyLock;
use types::{Epoch, EthSpec, ForkName, Keypair, MainnetEthSpec, PublicKeyBytes, Slot};
use types::{Epoch, EthSpec, Keypair, MainnetEthSpec, PublicKeyBytes, Slot};

// Should ideally be divisible by 3.
pub const VALIDATOR_COUNT: usize = 48;
Expand Down Expand Up @@ -117,7 +117,7 @@ async fn missed_blocks_across_epochs() {
}

#[tokio::test]
async fn produces_missed_blocks() {
async fn missed_blocks_basic() {
let validator_count = 16;

let slots_per_epoch = E::slots_per_epoch();
Expand All @@ -127,13 +127,10 @@ async fn produces_missed_blocks() {
// Generate 63 slots (2 epochs * 32 slots per epoch - 1)
let initial_blocks = slots_per_epoch * nb_epoch_to_simulate.as_u64() - 1;

// The validator index of the validator that is 'supposed' to miss a block
let validator_index_to_monitor = 1;

// 1st scenario //
//
// Missed block happens when slot and prev_slot are in the same epoch
let harness1 = get_harness(validator_count, vec![validator_index_to_monitor]);
let harness1 = get_harness(validator_count, vec![]);
harness1
.extend_chain(
initial_blocks as usize,
Expand All @@ -153,7 +150,7 @@ async fn produces_missed_blocks() {
let mut prev_slot = Slot::new(idx - 1);
let mut duplicate_block_root = *_state.block_roots().get(idx as usize).unwrap();
let mut validator_indexes = _state.get_beacon_proposer_indices(&harness1.spec).unwrap();
let mut validator_index = validator_indexes[slot_in_epoch.as_usize()];
let mut missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
let mut proposer_shuffling_decision_root = _state
.proposer_shuffling_decision_root(duplicate_block_root)
.unwrap();
Expand All @@ -170,7 +167,7 @@ async fn produces_missed_blocks() {
beacon_proposer_cache.lock().insert(
epoch,
proposer_shuffling_decision_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
validator_indexes,
_state.fork()
),
Ok(())
Expand All @@ -187,12 +184,15 @@ async fn produces_missed_blocks() {
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor = harness1.chain.validator_monitor.write();

validator_monitor.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
validator_monitor.process_valid_state(nb_epoch_to_simulate, _state, &harness1.chain.spec);

// We should have one entry in the missed blocks map
assert_eq!(
validator_monitor.get_monitored_validator_missed_block_count(validator_index as u64),
1
validator_monitor
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
1,
);
}

Expand All @@ -201,23 +201,7 @@ async fn produces_missed_blocks() {
// Missed block happens when slot and prev_slot are not in the same epoch
// making sure that the cache reloads when the epoch changes
// in that scenario the slot that missed a block is the first slot of the epoch
// We are adding other validators to monitor as these ones will miss a block depending on
// the fork name specified when running the test as the proposer cache differs depending on
// the fork name (cf. seed)
//
// If you are adding a new fork and seeing errors, print
// `validator_indexes[slot_in_epoch.as_usize()]` and add it below.
let validator_index_to_monitor = match harness1.spec.fork_name_at_slot::<E>(Slot::new(0)) {
ForkName::Base => 7,
ForkName::Altair => 2,
ForkName::Bellatrix => 4,
ForkName::Capella => 11,
ForkName::Deneb => 3,
ForkName::Electra => 1,
ForkName::Fulu => 6,
};

let harness2 = get_harness(validator_count, vec![validator_index_to_monitor]);
let harness2 = get_harness(validator_count, vec![]);
let advance_slot_by = 9;
harness2
.extend_chain(
Expand All @@ -238,11 +222,7 @@ async fn produces_missed_blocks() {
slot_in_epoch = slot % slots_per_epoch;
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
validator_index = validator_indexes[slot_in_epoch.as_usize()];
// If you are adding a new fork and seeing errors, it means the fork seed has changed the
// validator_index. Uncomment this line, run the test again and add the resulting index to the
// list above.
//eprintln!("new index which needs to be added => {:?}", validator_index);
missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];

let beacon_proposer_cache = harness2
.chain
Expand All @@ -256,7 +236,7 @@ async fn produces_missed_blocks() {
beacon_proposer_cache.lock().insert(
epoch,
duplicate_block_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
validator_indexes.clone(),
_state2.fork()
),
Ok(())
Expand All @@ -271,30 +251,33 @@ async fn produces_missed_blocks() {
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor2 = harness2.chain.validator_monitor.write();
validator_monitor2.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);
// We should have one entry in the missed blocks map
assert_eq!(
validator_monitor2.get_monitored_validator_missed_block_count(validator_index as u64),
validator_monitor2
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
1
);

// 3rd scenario //
//
// A missed block happens but the validator is not monitored
// it should not be flagged as a missed block
idx = initial_blocks + (advance_slot_by) - 7;
while validator_indexes[(idx % slots_per_epoch) as usize] == missed_block_proposer
&& idx / slots_per_epoch == epoch.as_u64()
{
idx += 1;
}
slot = Slot::new(idx);
prev_slot = Slot::new(idx - 1);
slot_in_epoch = slot % slots_per_epoch;
duplicate_block_root = *_state2.block_roots().get(idx as usize).unwrap();
validator_indexes = _state2.get_beacon_proposer_indices(&harness2.spec).unwrap();
let not_monitored_validator_index = validator_indexes[slot_in_epoch.as_usize()];
// This could do with a refactor: https://github.com/sigp/lighthouse/issues/6293
assert_ne!(
not_monitored_validator_index,
validator_index_to_monitor,
"this test has a fragile dependency on hardcoded indices. you need to tweak some settings or rewrite this"
);
let second_missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];

// This test may fail if we can't find another distinct proposer in the same epoch.
// However, this should be vanishingly unlikely: P ~= (1/16)^32 = 2e-39.
assert_ne!(missed_block_proposer, second_missed_block_proposer);

assert_eq!(
_state2.set_block_root(prev_slot, duplicate_block_root),
Expand All @@ -306,10 +289,9 @@ async fn produces_missed_blocks() {
validator_monitor2.process_valid_state(epoch, _state2, &harness2.chain.spec);

// We shouldn't have any entry in the missed blocks map
assert_ne!(validator_index, not_monitored_validator_index);
assert_eq!(
validator_monitor2
.get_monitored_validator_missed_block_count(not_monitored_validator_index as u64),
.get_monitored_validator_missed_block_count(second_missed_block_proposer as u64),
0
);
}
Expand All @@ -318,7 +300,7 @@ async fn produces_missed_blocks() {
//
// A missed block happens at state.slot - LOG_SLOTS_PER_EPOCH
// it shouldn't be flagged as a missed block
let harness3 = get_harness(validator_count, vec![validator_index_to_monitor]);
let harness3 = get_harness(validator_count, vec![]);
harness3
.extend_chain(
slots_per_epoch as usize,
Expand All @@ -338,7 +320,7 @@ async fn produces_missed_blocks() {
prev_slot = Slot::new(idx - 1);
duplicate_block_root = *_state3.block_roots().get(idx as usize).unwrap();
validator_indexes = _state3.get_beacon_proposer_indices(&harness3.spec).unwrap();
validator_index = validator_indexes[slot_in_epoch.as_usize()];
missed_block_proposer = validator_indexes[slot_in_epoch.as_usize()];
proposer_shuffling_decision_root = _state3
.proposer_shuffling_decision_root_at_epoch(epoch, duplicate_block_root)
.unwrap();
Expand All @@ -355,7 +337,7 @@ async fn produces_missed_blocks() {
beacon_proposer_cache.lock().insert(
epoch,
proposer_shuffling_decision_root,
validator_indexes.into_iter().collect::<Vec<usize>>(),
validator_indexes,
_state3.fork()
),
Ok(())
Expand All @@ -372,11 +354,13 @@ async fn produces_missed_blocks() {
// Let's validate the state which will call the function responsible for
// adding the missed blocks to the validator monitor
let mut validator_monitor3 = harness3.chain.validator_monitor.write();
validator_monitor3.add_validator_pubkey(KEYPAIRS[missed_block_proposer].pk.compress());
validator_monitor3.process_valid_state(epoch, _state3, &harness3.chain.spec);

// We shouldn't have one entry in the missed blocks map
assert_eq!(
validator_monitor3.get_monitored_validator_missed_block_count(validator_index as u64),
validator_monitor3
.get_monitored_validator_missed_block_count(missed_block_proposer as u64),
0
);
}
Expand Down
87 changes: 62 additions & 25 deletions beacon_node/execution_layer/src/engine_api/json_structures.rs
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ use superstruct::superstruct;
use types::beacon_block_body::KzgCommitments;
use types::blob_sidecar::BlobsList;
use types::execution_requests::{
ConsolidationRequests, DepositRequests, RequestPrefix, WithdrawalRequests,
ConsolidationRequests, DepositRequests, RequestType, WithdrawalRequests,
};
use types::{Blob, FixedVector, KzgProof, Unsigned};

Expand Down Expand Up @@ -401,47 +401,80 @@ impl<E: EthSpec> From<JsonExecutionPayload<E>> for ExecutionPayload<E> {
}
}

#[derive(Debug, Clone)]
pub enum RequestsError {
InvalidHex(hex::FromHexError),
EmptyRequest(usize),
InvalidOrdering,
InvalidPrefix(u8),
DecodeError(String),
}

/// Format of `ExecutionRequests` received over the engine api.
///
/// Array of ssz-encoded requests list encoded as hex bytes.
/// The prefix of the request type is used to index into the array.
///
/// For e.g. [0xab, 0xcd, 0xef]
/// Here, 0xab are the deposits bytes (prefix and index == 0)
/// 0xcd are the withdrawals bytes (prefix and index == 1)
/// 0xef are the consolidations bytes (prefix and index == 2)
/// Array of ssz-encoded requests list encoded as hex bytes prefixed
/// with a `RequestType`
#[derive(Debug, Default, Clone, PartialEq, Serialize, Deserialize)]
#[serde(transparent)]
pub struct JsonExecutionRequests(pub Vec<String>);

impl<E: EthSpec> TryFrom<JsonExecutionRequests> for ExecutionRequests<E> {
type Error = String;
type Error = RequestsError;

fn try_from(value: JsonExecutionRequests) -> Result<Self, Self::Error> {
let mut requests = ExecutionRequests::default();

let mut prev_prefix: Option<RequestType> = None;
for (i, request) in value.0.into_iter().enumerate() {
// hex string
let decoded_bytes = hex::decode(request.strip_prefix("0x").unwrap_or(&request))
.map_err(|e| format!("Invalid hex {:?}", e))?;
match RequestPrefix::from_prefix(i as u8) {
Some(RequestPrefix::Deposit) => {
requests.deposits = DepositRequests::<E>::from_ssz_bytes(&decoded_bytes)
.map_err(|e| format!("Failed to decode DepositRequest from EL: {:?}", e))?;
.map_err(RequestsError::InvalidHex)?;

// The first byte of each element is the `request_type` and the remaining bytes are the `request_data`.
// Elements with empty `request_data` **MUST** be excluded from the list.
let Some((prefix_byte, request_bytes)) = decoded_bytes.split_first() else {
return Err(RequestsError::EmptyRequest(i));
};
if request_bytes.is_empty() {
return Err(RequestsError::EmptyRequest(i));
}
// Elements of the list **MUST** be ordered by `request_type` in ascending order
let current_prefix = RequestType::from_u8(*prefix_byte)
.ok_or(RequestsError::InvalidPrefix(*prefix_byte))?;
if let Some(prev) = prev_prefix {
if prev.to_u8() >= current_prefix.to_u8() {
return Err(RequestsError::InvalidOrdering);
}
Some(RequestPrefix::Withdrawal) => {
requests.withdrawals = WithdrawalRequests::<E>::from_ssz_bytes(&decoded_bytes)
}
prev_prefix = Some(current_prefix);

match current_prefix {
RequestType::Deposit => {
requests.deposits = DepositRequests::<E>::from_ssz_bytes(request_bytes)
.map_err(|e| {
format!("Failed to decode WithdrawalRequest from EL: {:?}", e)
RequestsError::DecodeError(format!(
"Failed to decode DepositRequest from EL: {:?}",
e
))
})?;
}
Some(RequestPrefix::Consolidation) => {
RequestType::Withdrawal => {
requests.withdrawals = WithdrawalRequests::<E>::from_ssz_bytes(request_bytes)
.map_err(|e| {
RequestsError::DecodeError(format!(
"Failed to decode WithdrawalRequest from EL: {:?}",
e
))
})?;
}
RequestType::Consolidation => {
requests.consolidations =
ConsolidationRequests::<E>::from_ssz_bytes(&decoded_bytes).map_err(
|e| format!("Failed to decode ConsolidationRequest from EL: {:?}", e),
)?;
ConsolidationRequests::<E>::from_ssz_bytes(request_bytes).map_err(|e| {
RequestsError::DecodeError(format!(
"Failed to decode ConsolidationRequest from EL: {:?}",
e
))
})?;
}
None => return Err("Empty requests string".to_string()),
}
}
Ok(requests)
Expand Down Expand Up @@ -510,7 +543,9 @@ impl<E: EthSpec> TryFrom<JsonGetPayloadResponse<E>> for GetPayloadResponse<E> {
block_value: response.block_value,
blobs_bundle: response.blobs_bundle.into(),
should_override_builder: response.should_override_builder,
requests: response.execution_requests.try_into()?,
requests: response.execution_requests.try_into().map_err(|e| {
format!("Failed to convert json to execution requests : {:?}", e)
})?,
}))
}
JsonGetPayloadResponse::V5(response) => {
Expand All @@ -519,7 +554,9 @@ impl<E: EthSpec> TryFrom<JsonGetPayloadResponse<E>> for GetPayloadResponse<E> {
block_value: response.block_value,
blobs_bundle: response.blobs_bundle.into(),
should_override_builder: response.should_override_builder,
requests: response.execution_requests.try_into()?,
requests: response.execution_requests.try_into().map_err(|e| {
format!("Failed to convert json to execution requests {:?}", e)
})?,
}))
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,11 @@ impl<'block, E: EthSpec> NewPayloadRequest<'block, E> {

let _timer = metrics::start_timer(&metrics::EXECUTION_LAYER_VERIFY_BLOCK_HASH);

// Check that no transactions in the payload are zero length
if payload.transactions().iter().any(|slice| slice.is_empty()) {
return Err(Error::ZeroLengthTransaction);
}

let (header_hash, rlp_transactions_root) = calculate_execution_block_hash(
payload,
parent_beacon_block_root,
Expand Down
Loading

0 comments on commit 357e3e9

Please sign in to comment.