From ff739d56bea4ed2acd442d6a54f2c40bce01b9b8 Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Tue, 18 Feb 2025 11:39:49 +1100 Subject: [PATCH 01/10] Fix light client merkle proofs (#7007) Fix a regression introduced in this PR: - https://github.com/sigp/lighthouse/pull/6361 We were indexing into the `MerkleTree` with raw generalized indices, which was incorrect and triggering `debug_assert` failures, as described here: - https://github.com/sigp/lighthouse/issues/7005 - Convert `generalized_index` to the correct leaf index prior to proof generation. - Add sanity checks on indices used in `BeaconState::generate_proof`. - Remove debug asserts from `MerkleTree::generate_proof` in favour of actual errors. This would have caught the bug earlier. - Refactor the EF tests so that the merkle validity tests are actually run. They were misconfigured in a way that resulted in them running silently with 0 test cases, and the `check_all_files_accessed.py` script still had an ignore that covered the test files, so this omission wasn't detected. --- consensus/merkle_proof/src/lib.rs | 15 ++++- consensus/types/src/beacon_block_body.rs | 4 +- consensus/types/src/beacon_state.rs | 30 +++++++--- testing/ef_tests/check_all_files_accessed.py | 4 +- .../src/cases/merkle_proof_validity.rs | 57 +++++++++++++++---- testing/ef_tests/src/handler.rs | 34 ++--------- testing/ef_tests/tests/tests.rs | 10 +--- 7 files changed, 93 insertions(+), 61 deletions(-) diff --git a/consensus/merkle_proof/src/lib.rs b/consensus/merkle_proof/src/lib.rs index b01f3f4429f..271e676df1c 100644 --- a/consensus/merkle_proof/src/lib.rs +++ b/consensus/merkle_proof/src/lib.rs @@ -34,6 +34,8 @@ pub enum MerkleTree { pub enum MerkleTreeError { // Trying to push in a leaf LeafReached, + // Trying to generate a proof for a non-leaf node + NonLeafProof, // No more space in the MerkleTree MerkleTreeFull, // MerkleTree is invalid @@ -313,8 +315,17 @@ impl MerkleTree { current_depth -= 1; } - debug_assert_eq!(proof.len(), depth); - debug_assert!(current_node.is_leaf()); + if proof.len() != depth { + // This should be unreachable regardless of how the method is called, because we push + // one proof element for each layer of `depth`. + return Err(MerkleTreeError::PleaseNotifyTheDevs); + } + + // Generating a proof for a non-leaf node is invalid and indicates an error on the part of + // the caller. + if !current_node.is_leaf() { + return Err(MerkleTreeError::NonLeafProof); + } // Put proof in bottom-up order. proof.reverse(); diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 3f75790a357..410374c18f3 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -277,9 +277,9 @@ impl<'a, E: EthSpec, Payload: AbstractExecPayload> BeaconBlockBodyRef<'a, E, // https://github.com/ethereum/consensus-specs/blob/dev/specs/deneb/beacon-chain.md#beaconblockbody generalized_index .checked_sub(NUM_BEACON_BLOCK_BODY_HASH_TREE_ROOT_LEAVES) - .ok_or(Error::IndexNotSupported(generalized_index))? + .ok_or(Error::GeneralizedIndexNotSupported(generalized_index))? } - _ => return Err(Error::IndexNotSupported(generalized_index)), + _ => return Err(Error::GeneralizedIndexNotSupported(generalized_index)), }; let leaves = self.body_merkle_leaves(); diff --git a/consensus/types/src/beacon_state.rs b/consensus/types/src/beacon_state.rs index 157271b2272..4aed79898d3 100644 --- a/consensus/types/src/beacon_state.rs +++ b/consensus/types/src/beacon_state.rs @@ -157,6 +157,7 @@ pub enum Error { current_fork: ForkName, }, TotalActiveBalanceDiffUninitialized, + GeneralizedIndexNotSupported(usize), IndexNotSupported(usize), InvalidFlagIndex(usize), MerkleTreeError(merkle_proof::MerkleTreeError), @@ -2580,11 +2581,12 @@ impl BeaconState { // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - let field_index = if self.fork_name_unchecked().electra_enabled() { + let field_gindex = if self.fork_name_unchecked().electra_enabled() { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX_ELECTRA } else { light_client_update::CURRENT_SYNC_COMMITTEE_INDEX }; + let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } @@ -2594,11 +2596,12 @@ impl BeaconState { // for the internal nodes. Result should be 22 or 23, the field offset of the committee // in the `BeaconState`: // https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#beaconstate - let field_index = if self.fork_name_unchecked().electra_enabled() { + let field_gindex = if self.fork_name_unchecked().electra_enabled() { light_client_update::NEXT_SYNC_COMMITTEE_INDEX_ELECTRA } else { light_client_update::NEXT_SYNC_COMMITTEE_INDEX }; + let field_index = field_gindex.safe_sub(self.num_fields_pow2())?; let leaves = self.get_beacon_state_leaves(); self.generate_proof(field_index, &leaves) } @@ -2606,17 +2609,24 @@ impl BeaconState { pub fn compute_finalized_root_proof(&self) -> Result, Error> { // Finalized root is the right child of `finalized_checkpoint`, divide by two to get // the generalized index of `state.finalized_checkpoint`. - let field_index = if self.fork_name_unchecked().electra_enabled() { - // Index should be 169/2 - 64 = 20 which matches the position - // of `finalized_checkpoint` in `BeaconState` + let checkpoint_root_gindex = if self.fork_name_unchecked().electra_enabled() { light_client_update::FINALIZED_ROOT_INDEX_ELECTRA } else { - // Index should be 105/2 - 32 = 20 which matches the position - // of `finalized_checkpoint` in `BeaconState` light_client_update::FINALIZED_ROOT_INDEX }; + let checkpoint_gindex = checkpoint_root_gindex / 2; + + // Convert gindex to index by subtracting 2**depth (gindex = 2**depth + index). + // + // After Electra, the index should be 169/2 - 64 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState`. + // + // Prior to Electra, the index should be 105/2 - 32 = 20 which matches the position + // of `finalized_checkpoint` in `BeaconState`. + let checkpoint_index = checkpoint_gindex.safe_sub(self.num_fields_pow2())?; + let leaves = self.get_beacon_state_leaves(); - let mut proof = self.generate_proof(field_index, &leaves)?; + let mut proof = self.generate_proof(checkpoint_index, &leaves)?; proof.insert(0, self.finalized_checkpoint().epoch.tree_hash_root()); Ok(proof) } @@ -2626,6 +2636,10 @@ impl BeaconState { field_index: usize, leaves: &[Hash256], ) -> Result, Error> { + if field_index >= leaves.len() { + return Err(Error::IndexNotSupported(field_index)); + } + let depth = self.num_fields_pow2().ilog2() as usize; let tree = merkle_proof::MerkleTree::create(leaves, depth); let (_, proof) = tree.generate_proof(field_index, depth)?; diff --git a/testing/ef_tests/check_all_files_accessed.py b/testing/ef_tests/check_all_files_accessed.py index 8a662b72e35..4e744b797a5 100755 --- a/testing/ef_tests/check_all_files_accessed.py +++ b/testing/ef_tests/check_all_files_accessed.py @@ -27,10 +27,8 @@ "tests/.*/.*/ssz_static/PowBlock/", # We no longer implement merge logic. "tests/.*/bellatrix/fork_choice/on_merge_block", - # light_client - "tests/.*/.*/light_client/single_merkle_proof", + # Light client sync is not implemented "tests/.*/.*/light_client/sync", - "tests/.*/electra/light_client/update_ranking", # LightClientStore "tests/.*/.*/ssz_static/LightClientStore", # LightClientSnapshot diff --git a/testing/ef_tests/src/cases/merkle_proof_validity.rs b/testing/ef_tests/src/cases/merkle_proof_validity.rs index 109d2cc7969..711974dd43f 100644 --- a/testing/ef_tests/src/cases/merkle_proof_validity.rs +++ b/testing/ef_tests/src/cases/merkle_proof_validity.rs @@ -20,6 +20,12 @@ pub struct MerkleProof { pub branch: Vec, } +#[derive(Debug)] +pub enum GenericMerkleProofValidity { + BeaconState(BeaconStateMerkleProofValidity), + BeaconBlockBody(Box>), +} + #[derive(Debug, Clone, Deserialize)] #[serde(bound = "E: EthSpec")] pub struct BeaconStateMerkleProofValidity { @@ -28,6 +34,39 @@ pub struct BeaconStateMerkleProofValidity { pub merkle_proof: MerkleProof, } +impl LoadCase for GenericMerkleProofValidity { + fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { + let path_components = path.iter().collect::>(); + + // The "suite" name is the 2nd last directory in the path. + assert!( + path_components.len() >= 2, + "path should have at least 2 components" + ); + let suite_name = path_components[path_components.len() - 2]; + + if suite_name == "BeaconState" { + BeaconStateMerkleProofValidity::load_from_dir(path, fork_name) + .map(GenericMerkleProofValidity::BeaconState) + } else if suite_name == "BeaconBlockBody" { + BeaconBlockBodyMerkleProofValidity::load_from_dir(path, fork_name) + .map(Box::new) + .map(GenericMerkleProofValidity::BeaconBlockBody) + } else { + panic!("unsupported type for merkle proof test: {:?}", suite_name) + } + } +} + +impl Case for GenericMerkleProofValidity { + fn result(&self, case_index: usize, fork_name: ForkName) -> Result<(), Error> { + match self { + Self::BeaconState(test) => test.result(case_index, fork_name), + Self::BeaconBlockBody(test) => test.result(case_index, fork_name), + } + } +} + impl LoadCase for BeaconStateMerkleProofValidity { fn load_from_dir(path: &Path, fork_name: ForkName) -> Result { let spec = &testing_spec::(fork_name); @@ -72,11 +111,9 @@ impl Case for BeaconStateMerkleProofValidity { } }; - let Ok(proof) = proof else { - return Err(Error::FailedToParseTest( - "Could not retrieve merkle proof".to_string(), - )); - }; + let proof = proof.map_err(|e| { + Error::FailedToParseTest(format!("Could not retrieve merkle proof: {e:?}")) + })?; let proof_len = proof.len(); let branch_len = self.merkle_proof.branch.len(); if proof_len != branch_len { @@ -273,11 +310,11 @@ impl Case for BeaconBlockBodyMerkleProofValidity { fn result(&self, _case_index: usize, _fork_name: ForkName) -> Result<(), Error> { let binding = self.block_body.clone(); let block_body = binding.to_ref(); - let Ok(proof) = block_body.block_body_merkle_proof(self.merkle_proof.leaf_index) else { - return Err(Error::FailedToParseTest( - "Could not retrieve merkle proof".to_string(), - )); - }; + let proof = block_body + .block_body_merkle_proof(self.merkle_proof.leaf_index) + .map_err(|e| { + Error::FailedToParseTest(format!("Could not retrieve merkle proof: {e:?}")) + })?; let proof_len = proof.len(); let branch_len = self.merkle_proof.branch.len(); if proof_len != branch_len { diff --git a/testing/ef_tests/src/handler.rs b/testing/ef_tests/src/handler.rs index 481c9b2169f..a3754982396 100644 --- a/testing/ef_tests/src/handler.rs +++ b/testing/ef_tests/src/handler.rs @@ -1000,30 +1000,6 @@ impl Handler for KZGRecoverCellsAndKZGProofHandler { } } -#[derive(Derivative)] -#[derivative(Default(bound = ""))] -pub struct BeaconStateMerkleProofValidityHandler(PhantomData); - -impl Handler for BeaconStateMerkleProofValidityHandler { - type Case = cases::BeaconStateMerkleProofValidity; - - fn config_name() -> &'static str { - E::name() - } - - fn runner_name() -> &'static str { - "light_client" - } - - fn handler_name(&self) -> String { - "single_merkle_proof/BeaconState".into() - } - - fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - fork_name.altair_enabled() - } -} - #[derive(Derivative)] #[derivative(Default(bound = ""))] pub struct KzgInclusionMerkleProofValidityHandler(PhantomData); @@ -1054,10 +1030,10 @@ impl Handler for KzgInclusionMerkleProofValidityHandler(PhantomData); +pub struct MerkleProofValidityHandler(PhantomData); -impl Handler for BeaconBlockBodyMerkleProofValidityHandler { - type Case = cases::BeaconBlockBodyMerkleProofValidity; +impl Handler for MerkleProofValidityHandler { + type Case = cases::GenericMerkleProofValidity; fn config_name() -> &'static str { E::name() @@ -1068,11 +1044,11 @@ impl Handler for BeaconBlockBodyMerkleProofValidityHandle } fn handler_name(&self) -> String { - "single_merkle_proof/BeaconBlockBody".into() + "single_merkle_proof".into() } fn is_enabled_for_fork(&self, fork_name: ForkName) -> bool { - fork_name.capella_enabled() + fork_name.altair_enabled() } } diff --git a/testing/ef_tests/tests/tests.rs b/testing/ef_tests/tests/tests.rs index 1f5a7dd9974..3948708edf5 100644 --- a/testing/ef_tests/tests/tests.rs +++ b/testing/ef_tests/tests/tests.rs @@ -955,13 +955,9 @@ fn kzg_recover_cells_and_proofs() { } #[test] -fn beacon_state_merkle_proof_validity() { - BeaconStateMerkleProofValidityHandler::::default().run(); -} - -#[test] -fn beacon_block_body_merkle_proof_validity() { - BeaconBlockBodyMerkleProofValidityHandler::::default().run(); +fn light_client_merkle_proof_validity() { + MerkleProofValidityHandler::::default().run(); + MerkleProofValidityHandler::::default().run(); } #[test] From b3b6aea1c54ebfa54aa8798c204375e2d2553cac Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 23 Feb 2025 18:36:13 -0800 Subject: [PATCH 02/10] Rust 1.85 lints (#7019) N/A 2 changes: 1. Replace Option::map_or(true, ...) with is_none_or(...) 2. Remove unnecessary `Into::into` blocks where the type conversion is apparent from the types --- .github/workflows/test-suite.yml | 4 +- .../src/attestation_verification.rs | 28 +++--- beacon_node/beacon_chain/src/beacon_chain.rs | 6 +- .../beacon_chain/src/block_times_cache.rs | 4 +- .../overflow_lru_cache.rs | 1 - .../beacon_chain/src/shuffling_cache.rs | 2 +- .../beacon_chain/src/validator_monitor.rs | 2 +- beacon_node/client/src/notifier.rs | 2 +- .../test_utils/execution_block_generator.rs | 2 +- .../genesis/src/eth1_genesis_service.rs | 2 +- beacon_node/http_api/src/lib.rs | 8 +- beacon_node/http_api/src/produce_block.rs | 4 +- beacon_node/http_api/src/validators.rs | 6 +- beacon_node/http_api/tests/tests.rs | 4 +- .../src/peer_manager/network_behaviour.rs | 4 +- .../src/network_beacon_processor/mod.rs | 4 +- .../src/bls_to_execution_changes.rs | 2 +- beacon_node/operation_pool/src/lib.rs | 2 +- .../operation_pool/src/reward_cache.rs | 2 +- beacon_node/store/src/hot_cold_store.rs | 4 +- beacon_node/store/src/lib.rs | 2 - .../src/validator_definitions.rs | 1 - .../block_signature_verifier.rs | 1 - consensus/types/src/beacon_block_body.rs | 1 + consensus/types/src/runtime_var_list.rs | 10 +- slasher/src/database.rs | 2 +- .../web3signer_tests/src/get_web3signer.rs | 93 ++++++++----------- testing/web3signer_tests/src/lib.rs | 9 +- validator_client/http_api/src/lib.rs | 2 +- .../src/slashing_database.rs | 2 +- .../validator_services/src/duties_service.rs | 6 +- .../validator_services/src/sync.rs | 4 +- validator_client/validator_store/src/lib.rs | 4 +- 33 files changed, 102 insertions(+), 128 deletions(-) diff --git a/.github/workflows/test-suite.yml b/.github/workflows/test-suite.yml index 0f91c86617e..574f1eda791 100644 --- a/.github/workflows/test-suite.yml +++ b/.github/workflows/test-suite.yml @@ -19,7 +19,9 @@ env: # Disable debug info (see https://github.com/sigp/lighthouse/issues/4005) RUSTFLAGS: "-D warnings -C debuginfo=0" # Prevent Github API rate limiting. - LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + # NOTE: this token is a personal access token on Jimmy's account due to the default GITHUB_TOKEN + # not having access to other repositories. We should eventually devise a better solution here. + LIGHTHOUSE_GITHUB_TOKEN: ${{ secrets.LIGHTHOUSE_GITHUB_TOKEN }} # Enable self-hosted runners for the sigp repo only. SELF_HOSTED_RUNNERS: ${{ github.repository == 'sigp/lighthouse' }} # Self-hosted runners need to reference a different host for `./watch` tests. diff --git a/beacon_node/beacon_chain/src/attestation_verification.rs b/beacon_node/beacon_chain/src/attestation_verification.rs index a70a2caa4f3..00e86154876 100644 --- a/beacon_node/beacon_chain/src/attestation_verification.rs +++ b/beacon_node/beacon_chain/src/attestation_verification.rs @@ -1450,19 +1450,17 @@ where return Err(Error::UnknownTargetRoot(target.root)); } - chain - .with_committee_cache(target.root, attestation_epoch, |committee_cache, _| { - let committees_per_slot = committee_cache.committees_per_slot(); - - Ok(committee_cache - .get_beacon_committees_at_slot(attestation.data().slot) - .map(|committees| map_fn((committees, committees_per_slot))) - .unwrap_or_else(|_| { - Err(Error::NoCommitteeForSlotAndIndex { - slot: attestation.data().slot, - index: attestation.committee_index().unwrap_or(0), - }) - })) - }) - .map_err(BeaconChainError::from)? + chain.with_committee_cache(target.root, attestation_epoch, |committee_cache, _| { + let committees_per_slot = committee_cache.committees_per_slot(); + + Ok(committee_cache + .get_beacon_committees_at_slot(attestation.data().slot) + .map(|committees| map_fn((committees, committees_per_slot))) + .unwrap_or_else(|_| { + Err(Error::NoCommitteeForSlotAndIndex { + slot: attestation.data().slot, + index: attestation.committee_index().unwrap_or(0), + }) + })) + })? } diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index ca21b519f15..eb731f7d6ad 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -6506,9 +6506,9 @@ impl BeaconChain { /// Returns `true` if the given slot is prior to the `bellatrix_fork_epoch`. pub fn slot_is_prior_to_bellatrix(&self, slot: Slot) -> bool { - self.spec.bellatrix_fork_epoch.map_or(true, |bellatrix| { - slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix - }) + self.spec + .bellatrix_fork_epoch + .is_none_or(|bellatrix| slot.epoch(T::EthSpec::slots_per_epoch()) < bellatrix) } /// Returns the value of `execution_optimistic` for `block`. diff --git a/beacon_node/beacon_chain/src/block_times_cache.rs b/beacon_node/beacon_chain/src/block_times_cache.rs index af122ccdc06..bd1adb7e407 100644 --- a/beacon_node/beacon_chain/src/block_times_cache.rs +++ b/beacon_node/beacon_chain/src/block_times_cache.rs @@ -173,7 +173,7 @@ impl BlockTimesCache { if block_times .timestamps .all_blobs_observed - .map_or(true, |prev| timestamp > prev) + .is_none_or(|prev| timestamp > prev) { block_times.timestamps.all_blobs_observed = Some(timestamp); } @@ -195,7 +195,7 @@ impl BlockTimesCache { .entry(block_root) .or_insert_with(|| BlockTimesCacheValue::new(slot)); let existing_timestamp = field(&mut block_times.timestamps); - if existing_timestamp.map_or(true, |prev| timestamp < prev) { + if existing_timestamp.is_none_or(|prev| timestamp < prev) { *existing_timestamp = Some(timestamp); } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index cd793c8394b..da6372a43aa 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -307,7 +307,6 @@ impl PendingComponents { .map(|b| b.map(|b| b.to_blob())) .take(num_blobs_expected) .collect::>>() - .map(Into::into) else { return Err(AvailabilityCheckError::Unexpected); }; diff --git a/beacon_node/beacon_chain/src/shuffling_cache.rs b/beacon_node/beacon_chain/src/shuffling_cache.rs index 67ca72254b6..dec73a763fe 100644 --- a/beacon_node/beacon_chain/src/shuffling_cache.rs +++ b/beacon_node/beacon_chain/src/shuffling_cache.rs @@ -138,7 +138,7 @@ impl ShufflingCache { .get(&key) // Replace the committee if it's not present or if it's a promise. A bird in the hand is // worth two in the promise-bush! - .map_or(true, CacheItem::is_promise) + .is_none_or(CacheItem::is_promise) { self.insert_cache_item( key, diff --git a/beacon_node/beacon_chain/src/validator_monitor.rs b/beacon_node/beacon_chain/src/validator_monitor.rs index f8a483c6214..cb27f0727a3 100644 --- a/beacon_node/beacon_chain/src/validator_monitor.rs +++ b/beacon_node/beacon_chain/src/validator_monitor.rs @@ -628,7 +628,7 @@ impl ValidatorMonitor { // the proposer shuffling cache lock when there are lots of missed blocks. if proposers_per_epoch .as_ref() - .map_or(true, |(_, cached_epoch)| *cached_epoch != slot_epoch) + .is_none_or(|(_, cached_epoch)| *cached_epoch != slot_epoch) { proposers_per_epoch = self .get_proposers_by_epoch_from_cache( diff --git a/beacon_node/client/src/notifier.rs b/beacon_node/client/src/notifier.rs index 0c3b1578d6e..c735d125380 100644 --- a/beacon_node/client/src/notifier.rs +++ b/beacon_node/client/src/notifier.rs @@ -187,7 +187,7 @@ pub fn spawn_notifier( let is_backfilling = matches!(current_sync_state, SyncState::BackFillSyncing { .. }); if is_backfilling && last_backfill_log_slot - .map_or(true, |slot| slot + BACKFILL_LOG_INTERVAL <= current_slot) + .is_none_or(|slot| slot + BACKFILL_LOG_INTERVAL <= current_slot) { last_backfill_log_slot = Some(current_slot); diff --git a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs index 9fa375b3757..5a5c9e1fa9a 100644 --- a/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs +++ b/beacon_node/execution_layer/src/test_utils/execution_block_generator.rs @@ -448,7 +448,7 @@ impl ExecutionBlockGenerator { if self .head_block .as_ref() - .map_or(true, |head| head.block_hash() == last_block_hash) + .is_none_or(|head| head.block_hash() == last_block_hash) { self.head_block = Some(block.clone()); } diff --git a/beacon_node/genesis/src/eth1_genesis_service.rs b/beacon_node/genesis/src/eth1_genesis_service.rs index b5f4bd50ee2..6e8f38627ca 100644 --- a/beacon_node/genesis/src/eth1_genesis_service.rs +++ b/beacon_node/genesis/src/eth1_genesis_service.rs @@ -263,7 +263,7 @@ impl Eth1GenesisService { // again later. if eth1_service .highest_safe_block() - .map_or(true, |n| block.number > n) + .is_none_or(|n| block.number > n) { continue; } diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index d6431fe7293..5d75dc8c9a0 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -1939,10 +1939,10 @@ pub fn serve( query: api_types::AttestationPoolQuery| { task_spawner.blocking_response_task(Priority::P1, move || { let query_filter = |data: &AttestationData| { - query.slot.map_or(true, |slot| slot == data.slot) + query.slot.is_none_or(|slot| slot == data.slot) && query .committee_index - .map_or(true, |index| index == data.index) + .is_none_or(|index| index == data.index) }; let mut attestations = chain.op_pool.get_filtered_attestations(query_filter); @@ -3159,11 +3159,11 @@ pub fn serve( peer_info.connection_status(), ); - let state_matches = query.state.as_ref().map_or(true, |states| { + let state_matches = query.state.as_ref().is_none_or(|states| { states.iter().any(|state_param| *state_param == state) }); let direction_matches = - query.direction.as_ref().map_or(true, |directions| { + query.direction.as_ref().is_none_or(|directions| { directions.iter().any(|dir_param| *dir_param == direction) }); diff --git a/beacon_node/http_api/src/produce_block.rs b/beacon_node/http_api/src/produce_block.rs index 0e24e8f1758..22d6f0e7ae1 100644 --- a/beacon_node/http_api/src/produce_block.rs +++ b/beacon_node/http_api/src/produce_block.rs @@ -147,7 +147,7 @@ pub async fn produce_blinded_block_v2( .produce_block_with_verification( randao_reveal, slot, - query.graffiti.map(Into::into), + query.graffiti, randao_verification, None, BlockProductionVersion::BlindedV2, @@ -178,7 +178,7 @@ pub async fn produce_block_v2( .produce_block_with_verification( randao_reveal, slot, - query.graffiti.map(Into::into), + query.graffiti, randao_verification, None, BlockProductionVersion::FullV2, diff --git a/beacon_node/http_api/src/validators.rs b/beacon_node/http_api/src/validators.rs index 93e63953ef7..f3d78e6fcd5 100644 --- a/beacon_node/http_api/src/validators.rs +++ b/beacon_node/http_api/src/validators.rs @@ -29,7 +29,7 @@ pub fn get_beacon_state_validators( .enumerate() // filter by validator id(s) if provided .filter(|(index, (validator, _))| { - ids_filter_set.as_ref().map_or(true, |ids_set| { + ids_filter_set.as_ref().is_none_or(|ids_set| { ids_set.contains(&ValidatorId::PublicKey(validator.pubkey)) || ids_set.contains(&ValidatorId::Index(*index as u64)) }) @@ -42,7 +42,7 @@ pub fn get_beacon_state_validators( far_future_epoch, ); - let status_matches = query_statuses.as_ref().map_or(true, |statuses| { + let status_matches = query_statuses.as_ref().is_none_or(|statuses| { statuses.contains(&status) || statuses.contains(&status.superstatus()) }); @@ -92,7 +92,7 @@ pub fn get_beacon_state_validator_balances( .enumerate() // filter by validator id(s) if provided .filter(|(index, (validator, _))| { - ids_filter_set.as_ref().map_or(true, |ids_set| { + ids_filter_set.as_ref().is_none_or(|ids_set| { ids_set.contains(&ValidatorId::PublicKey(validator.pubkey)) || ids_set.contains(&ValidatorId::Index(*index as u64)) }) diff --git a/beacon_node/http_api/tests/tests.rs b/beacon_node/http_api/tests/tests.rs index 556ddebad3b..f7dbedc9ca9 100644 --- a/beacon_node/http_api/tests/tests.rs +++ b/beacon_node/http_api/tests/tests.rs @@ -2450,8 +2450,8 @@ impl ApiTester { }; let state_match = - states.map_or(true, |states| states.contains(&PeerState::Connected)); - let dir_match = dirs.map_or(true, |dirs| dirs.contains(&PeerDirection::Inbound)); + states.is_none_or(|states| states.contains(&PeerState::Connected)); + let dir_match = dirs.is_none_or(|dirs| dirs.contains(&PeerDirection::Inbound)); let mut expected_peers = Vec::new(); if state_match && dir_match { diff --git a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs index abafb200bef..ee2a7461428 100644 --- a/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs +++ b/beacon_node/lighthouse_network/src/peer_manager/network_behaviour.rs @@ -201,7 +201,7 @@ impl NetworkBehaviour for PeerManager { .peers .read() .peer_info(&peer_id) - .map_or(true, |peer| !peer.has_future_duty()) + .is_none_or(|peer| !peer.has_future_duty()) { return Err(ConnectionDenied::new( "Connection to peer rejected: too many connections", @@ -240,7 +240,7 @@ impl NetworkBehaviour for PeerManager { .peers .read() .peer_info(&peer_id) - .map_or(true, |peer| !peer.has_future_duty()) + .is_none_or(|peer| !peer.has_future_duty()) { return Err(ConnectionDenied::new( "Connection to peer rejected: too many connections", diff --git a/beacon_node/network/src/network_beacon_processor/mod.rs b/beacon_node/network/src/network_beacon_processor/mod.rs index c06a1f6ee37..13c7df8095b 100644 --- a/beacon_node/network/src/network_beacon_processor/mod.rs +++ b/beacon_node/network/src/network_beacon_processor/mod.rs @@ -79,9 +79,7 @@ const BLOB_PUBLICATION_EXP_FACTOR: usize = 2; impl NetworkBeaconProcessor { fn try_send(&self, event: BeaconWorkEvent) -> Result<(), Error> { - self.beacon_processor_send - .try_send(event) - .map_err(Into::into) + self.beacon_processor_send.try_send(event) } /// Create a new `Work` event for some `SingleAttestation`. diff --git a/beacon_node/operation_pool/src/bls_to_execution_changes.rs b/beacon_node/operation_pool/src/bls_to_execution_changes.rs index cbab97e7199..b36299b51a0 100644 --- a/beacon_node/operation_pool/src/bls_to_execution_changes.rs +++ b/beacon_node/operation_pool/src/bls_to_execution_changes.rs @@ -112,7 +112,7 @@ impl BlsToExecutionChanges { head_state .validators() .get(validator_index as usize) - .map_or(true, |validator| { + .is_none_or(|validator| { let prune = validator.has_execution_withdrawal_credential(spec) && head_block .message() diff --git a/beacon_node/operation_pool/src/lib.rs b/beacon_node/operation_pool/src/lib.rs index 835133a0598..584a5f9f323 100644 --- a/beacon_node/operation_pool/src/lib.rs +++ b/beacon_node/operation_pool/src/lib.rs @@ -767,7 +767,7 @@ fn prune_validator_hash_map( && head_state .validators() .get(validator_index as usize) - .map_or(true, |validator| !prune_if(validator_index, validator)) + .is_none_or(|validator| !prune_if(validator_index, validator)) }); } diff --git a/beacon_node/operation_pool/src/reward_cache.rs b/beacon_node/operation_pool/src/reward_cache.rs index dd9902353f8..adedcb5e39e 100644 --- a/beacon_node/operation_pool/src/reward_cache.rs +++ b/beacon_node/operation_pool/src/reward_cache.rs @@ -83,7 +83,7 @@ impl RewardCache { if self .initialization .as_ref() - .map_or(true, |init| *init != new_init) + .is_none_or(|init| *init != new_init) { self.update_previous_epoch_participation(state) .map_err(OpPoolError::RewardCacheUpdatePrevEpoch)?; diff --git a/beacon_node/store/src/hot_cold_store.rs b/beacon_node/store/src/hot_cold_store.rs index e4a857b7994..6dee0dc180a 100644 --- a/beacon_node/store/src/hot_cold_store.rs +++ b/beacon_node/store/src/hot_cold_store.rs @@ -903,7 +903,7 @@ impl, Cold: ItemStore> HotColdDB state_root: &Hash256, summary: HotStateSummary, ) -> Result<(), Error> { - self.hot_db.put(state_root, &summary).map_err(Into::into) + self.hot_db.put(state_root, &summary) } /// Store a state in the store. @@ -1248,7 +1248,7 @@ impl, Cold: ItemStore> HotColdDB state_root.as_slice().to_vec(), )); - if slot.map_or(true, |slot| slot % E::slots_per_epoch() == 0) { + if slot.is_none_or(|slot| slot % E::slots_per_epoch() == 0) { key_value_batch.push(KeyValueStoreOp::DeleteKey( DBColumn::BeaconState, state_root.as_slice().to_vec(), diff --git a/beacon_node/store/src/lib.rs b/beacon_node/store/src/lib.rs index 0cfc42ab156..2b5be034894 100644 --- a/beacon_node/store/src/lib.rs +++ b/beacon_node/store/src/lib.rs @@ -195,7 +195,6 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati let key = key.as_slice(); self.put_bytes(column, key, &item.as_store_bytes()) - .map_err(Into::into) } fn put_sync(&self, key: &Hash256, item: &I) -> Result<(), Error> { @@ -203,7 +202,6 @@ pub trait ItemStore: KeyValueStore + Sync + Send + Sized + 'stati let key = key.as_slice(); self.put_bytes_sync(column, key, &item.as_store_bytes()) - .map_err(Into::into) } /// Retrieve an item from `Self`. diff --git a/common/account_utils/src/validator_definitions.rs b/common/account_utils/src/validator_definitions.rs index 7337d6dfb40..25cf368c900 100644 --- a/common/account_utils/src/validator_definitions.rs +++ b/common/account_utils/src/validator_definitions.rs @@ -115,7 +115,6 @@ impl SigningDefinition { voting_keystore_password_path: Some(path), .. } => read_password_string(path) - .map(Into::into) .map(Option::Some) .map_err(Error::UnableToReadKeystorePassword), SigningDefinition::LocalKeystore { .. } => Err(Error::KeystoreWithoutPassword), diff --git a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs index 24cb51d7557..8d4a5441967 100644 --- a/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs +++ b/consensus/state_processing/src/per_block_processing/block_signature_verifier.rs @@ -293,7 +293,6 @@ where )?); Ok(()) }) - .map_err(Error::into) } /// Includes all signatures in `self.block.body.voluntary_exits` for verification. diff --git a/consensus/types/src/beacon_block_body.rs b/consensus/types/src/beacon_block_body.rs index 410374c18f3..10c1a11edec 100644 --- a/consensus/types/src/beacon_block_body.rs +++ b/consensus/types/src/beacon_block_body.rs @@ -971,6 +971,7 @@ impl From>> Option>, ) { + #[allow(clippy::useless_conversion)] // Not a useless conversion fn from(body: BeaconBlockBody>) -> Self { map_beacon_block_body!(body, |inner, cons| { let (block, payload) = inner.into(); diff --git a/consensus/types/src/runtime_var_list.rs b/consensus/types/src/runtime_var_list.rs index 857073b3b84..d6b1c10e993 100644 --- a/consensus/types/src/runtime_var_list.rs +++ b/consensus/types/src/runtime_var_list.rs @@ -134,13 +134,13 @@ impl RuntimeVariableList { ))); } - bytes - .chunks(::ssz_fixed_len()) - .try_fold(Vec::with_capacity(num_items), |mut vec, chunk| { + bytes.chunks(::ssz_fixed_len()).try_fold( + Vec::with_capacity(num_items), + |mut vec, chunk| { vec.push(::from_ssz_bytes(chunk)?); Ok(vec) - }) - .map(Into::into)? + }, + )? } else { ssz::decode_list_of_variable_length_items(bytes, Some(max_len))? }; diff --git a/slasher/src/database.rs b/slasher/src/database.rs index e2b49dca297..9e5e827034a 100644 --- a/slasher/src/database.rs +++ b/slasher/src/database.rs @@ -665,7 +665,7 @@ impl SlasherDB { target: Epoch, prev_max_target: Option, ) -> Result, Error> { - if prev_max_target.map_or(true, |prev_max| target > prev_max) { + if prev_max_target.is_none_or(|prev_max| target > prev_max) { return Ok(None); } diff --git a/testing/web3signer_tests/src/get_web3signer.rs b/testing/web3signer_tests/src/get_web3signer.rs index 800feb204ae..8c46a07a7dd 100644 --- a/testing/web3signer_tests/src/get_web3signer.rs +++ b/testing/web3signer_tests/src/get_web3signer.rs @@ -1,65 +1,33 @@ //! This build script downloads the latest Web3Signer release and places it in the `OUT_DIR` so it //! can be used for integration testing. -use reqwest::{ - header::{self, HeaderValue}, - Client, -}; -use serde_json::Value; +use reqwest::Client; use std::env; use std::fs; -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use zip::ZipArchive; /// Use `None` to download the latest Github release. /// Use `Some("21.8.1")` to download a specific version. const FIXED_VERSION_STRING: Option<&str> = None; -pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { - let version_file = dest_dir.join("version"); - - let client = Client::builder() - // Github gives a 403 without a user agent. - .user_agent("web3signer_tests") - .build() - .unwrap(); - +// This function no longer makes any attempt to avoid downloads, because in practice we use it +// with a fresh temp directory every time we run the tests. We might want to change this in future +// to enable reproducible/offline testing. +pub async fn download_binary(dest_dir: PathBuf) { let version = if let Some(version) = FIXED_VERSION_STRING { version.to_string() } else if let Ok(env_version) = env::var("LIGHTHOUSE_WEB3SIGNER_VERSION") { env_version } else { - // Get the latest release of the web3 signer repo. - let mut token_header_value = HeaderValue::from_str(github_token).unwrap(); - token_header_value.set_sensitive(true); - let latest_response: Value = client - .get("https://api.github.com/repos/ConsenSys/web3signer/releases/latest") - .header(header::AUTHORIZATION, token_header_value) - .send() - .await - .unwrap() - .error_for_status() - .unwrap() - .json() - .await - .unwrap(); - latest_response - .get("tag_name") - .unwrap() - .as_str() - .unwrap() - .to_string() + // The Consenys artifact server resolves `latest` to the latest release. We previously hit + // the Github API to establish the version, but that is no longer necessary. + "latest".to_string() }; + eprintln!("Downloading web3signer version: {version}"); - if version_file.exists() && fs::read(&version_file).unwrap() == version.as_bytes() { - // The latest version is already downloaded, do nothing. - return; - } else { - // Ignore the result since we don't care if the version file already exists. - let _ = fs::remove_file(&version_file); - } - - // Download the latest release zip. + // Download the release zip. + let client = Client::builder().build().unwrap(); let zip_url = format!("https://artifacts.consensys.net/public/web3signer/raw/names/web3signer.zip/versions/{}/web3signer-{}.zip", version, version); let zip_response = client .get(zip_url) @@ -73,8 +41,9 @@ pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { .unwrap(); // Write the zip to a file. - let zip_path = dest_dir.join(format!("{}.zip", version)); + let zip_path = dest_dir.join(format!("web3signer-{version}.zip")); fs::write(&zip_path, zip_response).unwrap(); + // Unzip the zip. let mut zip_file = fs::File::open(&zip_path).unwrap(); ZipArchive::new(&mut zip_file) @@ -88,15 +57,33 @@ pub async fn download_binary(dest_dir: PathBuf, github_token: &str) { if web3signer_dir.exists() { fs::remove_dir_all(&web3signer_dir).unwrap(); } - fs::rename( - dest_dir.join(format!("web3signer-{}", version)), - web3signer_dir, - ) - .unwrap(); - // Delete zip and unzipped dir. + let versioned_web3signer_dir = find_versioned_web3signer_dir(&dest_dir); + eprintln!( + "Renaming versioned web3signer dir at: {}", + versioned_web3signer_dir.display() + ); + + fs::rename(versioned_web3signer_dir, web3signer_dir).unwrap(); + + // Delete zip. fs::remove_file(&zip_path).unwrap(); +} + +fn find_versioned_web3signer_dir(dest_dir: &Path) -> PathBuf { + for entry in fs::read_dir(dest_dir).unwrap() { + let entry = entry.unwrap(); + let path = entry.path(); - // Update the version file to avoid duplicate downloads. - fs::write(&version_file, version).unwrap(); + if path + .file_name() + .and_then(|n| n.to_str()) + .map(|s| s.starts_with("web3signer-")) + .unwrap_or(false) + && entry.file_type().unwrap().is_dir() + { + return path; + } + } + panic!("no directory named web3signer-* found after ZIP extraction") } diff --git a/testing/web3signer_tests/src/lib.rs b/testing/web3signer_tests/src/lib.rs index e0dee9ceb4b..659495b2b3c 100644 --- a/testing/web3signer_tests/src/lib.rs +++ b/testing/web3signer_tests/src/lib.rs @@ -178,14 +178,7 @@ mod tests { pub async fn new(network: &str, listen_address: &str, listen_port: u16) -> Self { GET_WEB3SIGNER_BIN .get_or_init(|| async { - // Read a Github API token from the environment. This is intended to prevent rate-limits on CI. - // We use a name that is unlikely to accidentally collide with anything the user has configured. - let github_token = env::var("LIGHTHOUSE_GITHUB_TOKEN"); - download_binary( - TEMP_DIR.lock().path().to_path_buf(), - github_token.as_deref().unwrap_or(""), - ) - .await; + download_binary(TEMP_DIR.lock().path().to_path_buf()).await; }) .await; diff --git a/validator_client/http_api/src/lib.rs b/validator_client/http_api/src/lib.rs index 9c3e3da63d1..ae50b6a927a 100644 --- a/validator_client/http_api/src/lib.rs +++ b/validator_client/http_api/src/lib.rs @@ -767,7 +767,7 @@ pub fn serve( // Disabling an already disabled validator *with no other changes* is a // no-op. (Some(false), None) - if body.enabled.map_or(true, |enabled| !enabled) + if body.enabled.is_none_or(|enabled| !enabled) && body.gas_limit.is_none() && body.builder_boost_factor.is_none() && body.builder_proposals.is_none() diff --git a/validator_client/slashing_protection/src/slashing_database.rs b/validator_client/slashing_protection/src/slashing_database.rs index 71611339f94..f4c844d3140 100644 --- a/validator_client/slashing_protection/src/slashing_database.rs +++ b/validator_client/slashing_protection/src/slashing_database.rs @@ -1113,7 +1113,7 @@ fn max_or(opt_x: Option, y: T) -> T { /// /// If prev is `None` and `new` is `Some` then `true` is returned. fn monotonic(new: Option, prev: Option) -> bool { - new.is_some_and(|new_val| prev.map_or(true, |prev_val| new_val >= prev_val)) + new.is_some_and(|new_val| prev.is_none_or(|prev_val| new_val >= prev_val)) } /// The result of importing a single entry from an interchange file. diff --git a/validator_client/validator_services/src/duties_service.rs b/validator_client/validator_services/src/duties_service.rs index 187eb4feb50..1c0fd338d27 100644 --- a/validator_client/validator_services/src/duties_service.rs +++ b/validator_client/validator_services/src/duties_service.rs @@ -843,10 +843,10 @@ async fn poll_beacon_attesters_for_epoch( local_pubkeys .iter() .filter(|pubkey| { - attesters.get(pubkey).map_or(true, |duties| { + attesters.get(pubkey).is_none_or(|duties| { duties .get(&epoch) - .map_or(true, |(prior, _)| *prior != dependent_root) + .is_none_or(|(prior, _)| *prior != dependent_root) }) }) .collect::>() @@ -974,7 +974,7 @@ fn get_uninitialized_validators( .filter(|pubkey| { attesters .get(pubkey) - .map_or(true, |duties| !duties.contains_key(epoch)) + .is_none_or(|duties| !duties.contains_key(epoch)) }) .filter_map(|pubkey| duties_service.validator_store.validator_index(pubkey)) .collect::>() diff --git a/validator_client/validator_services/src/sync.rs b/validator_client/validator_services/src/sync.rs index dd3e05088e5..6c983b54307 100644 --- a/validator_client/validator_services/src/sync.rs +++ b/validator_client/validator_services/src/sync.rs @@ -297,7 +297,7 @@ pub async fn poll_sync_committee_duties( // If the Altair fork is yet to be activated, do not attempt to poll for duties. if spec .altair_fork_epoch - .map_or(true, |altair_epoch| current_epoch < altair_epoch) + .is_none_or(|altair_epoch| current_epoch < altair_epoch) { return Ok(()); } @@ -474,7 +474,7 @@ pub async fn poll_sync_committee_duties_for_period ValidatorStore { let mut validator_def = ValidatorDefinition::new_keystore_with_password( voting_keystore_path, password_storage, - graffiti.map(Into::into), + graffiti, suggested_fee_recipient, gas_limit, builder_proposals, @@ -327,7 +327,7 @@ impl ValidatorStore { .as_ref() // If there's no doppelganger service then we assume it is purposefully disabled and // declare that all keys are safe with regard to it. - .map_or(true, |doppelganger_service| { + .is_none_or(|doppelganger_service| { doppelganger_service .validator_status(validator_pubkey) .only_safe() From 522b3cbaab7ae5d4d3554768a90d83ce3fec48ae Mon Sep 17 00:00:00 2001 From: Pawan Dhananjay Date: Sun, 23 Feb 2025 19:39:13 -0800 Subject: [PATCH 03/10] Fix builder API headers (#7009) Resolves https://github.com/sigp/lighthouse/issues/7000 Set the accept header on builder to the correct value when requesting ssz. This PR also adds a flag to disable ssz over the builder api altogether. In the case that builders/relays have an ssz bug, we can react quickly by asking clients to restart their nodes with the `--disable-ssz-builder` flag to force json. I'm not fully convinced if this is useful so open to removing it or opening another PR for it. Testing this currently. --- beacon_node/beacon_chain/src/test_utils.rs | 1 + beacon_node/builder_client/src/lib.rs | 106 ++++++++++++++++----- beacon_node/execution_layer/src/lib.rs | 22 ++++- beacon_node/src/cli.rs | 9 ++ beacon_node/src/config.rs | 2 + book/src/help_bn.md | 2 + lighthouse/tests/beacon_node.rs | 34 +++++++ 7 files changed, 149 insertions(+), 27 deletions(-) diff --git a/beacon_node/beacon_chain/src/test_utils.rs b/beacon_node/beacon_chain/src/test_utils.rs index 8c9e3929f6b..24c85b3e070 100644 --- a/beacon_node/beacon_chain/src/test_utils.rs +++ b/beacon_node/beacon_chain/src/test_utils.rs @@ -779,6 +779,7 @@ where SensitiveUrl::parse(format!("http://127.0.0.1:{port}").as_str()).unwrap(), None, None, + false, ) .unwrap(); diff --git a/beacon_node/builder_client/src/lib.rs b/beacon_node/builder_client/src/lib.rs index 5f64ac7e43c..6d82542cefc 100644 --- a/beacon_node/builder_client/src/lib.rs +++ b/beacon_node/builder_client/src/lib.rs @@ -29,6 +29,11 @@ pub const DEFAULT_GET_HEADER_TIMEOUT_MILLIS: u64 = 1000; /// Default user agent for HTTP requests. pub const DEFAULT_USER_AGENT: &str = lighthouse_version::VERSION; +/// The value we set on the `ACCEPT` http header to indicate a preference for ssz response. +pub const PREFERENCE_ACCEPT_VALUE: &str = "application/octet-stream;q=1.0,application/json;q=0.9"; +/// Only accept json responses. +pub const JSON_ACCEPT_VALUE: &str = "application/json"; + #[derive(Clone)] pub struct Timeouts { get_header: Duration, @@ -57,7 +62,12 @@ pub struct BuilderHttpClient { server: SensitiveUrl, timeouts: Timeouts, user_agent: String, - ssz_enabled: Arc, + /// Only use json for all requests/responses types. + disable_ssz: bool, + /// Indicates that the `get_header` response had content-type ssz + /// so we can set content-type header to ssz to make the `submit_blinded_blocks` + /// request. + ssz_available: Arc, } impl BuilderHttpClient { @@ -65,6 +75,7 @@ impl BuilderHttpClient { server: SensitiveUrl, user_agent: Option, builder_header_timeout: Option, + disable_ssz: bool, ) -> Result { let user_agent = user_agent.unwrap_or(DEFAULT_USER_AGENT.to_string()); let client = reqwest::Client::builder().user_agent(&user_agent).build()?; @@ -73,7 +84,8 @@ impl BuilderHttpClient { server, timeouts: Timeouts::new(builder_header_timeout), user_agent, - ssz_enabled: Arc::new(false.into()), + disable_ssz, + ssz_available: Arc::new(false.into()), }) } @@ -124,7 +136,7 @@ impl BuilderHttpClient { let Ok(Some(fork_name)) = self.fork_name_from_header(&headers) else { // if no fork version specified, attempt to fallback to JSON - self.ssz_enabled.store(false, Ordering::SeqCst); + self.ssz_available.store(false, Ordering::SeqCst); return serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson); }; @@ -132,7 +144,7 @@ impl BuilderHttpClient { match content_type { ContentType::Ssz => { - self.ssz_enabled.store(true, Ordering::SeqCst); + self.ssz_available.store(true, Ordering::SeqCst); T::from_ssz_bytes_by_fork(&response_bytes, fork_name) .map(|data| ForkVersionedResponse { version: Some(fork_name), @@ -142,15 +154,17 @@ impl BuilderHttpClient { .map_err(Error::InvalidSsz) } ContentType::Json => { - self.ssz_enabled.store(false, Ordering::SeqCst); + self.ssz_available.store(false, Ordering::SeqCst); serde_json::from_slice(&response_bytes).map_err(Error::InvalidJson) } } } /// Return `true` if the most recently received response from the builder had SSZ Content-Type. - pub fn is_ssz_enabled(&self) -> bool { - self.ssz_enabled.load(Ordering::SeqCst) + /// Return `false` otherwise. + /// Also returns `false` if we have explicitly disabled ssz. + pub fn is_ssz_available(&self) -> bool { + !self.disable_ssz && self.ssz_available.load(Ordering::SeqCst) } async fn get_with_timeout( @@ -213,7 +227,7 @@ impl BuilderHttpClient { &self, url: U, ssz_body: Vec, - mut headers: HeaderMap, + headers: HeaderMap, timeout: Option, ) -> Result { let mut builder = self.client.post(url); @@ -221,11 +235,6 @@ impl BuilderHttpClient { builder = builder.timeout(timeout); } - headers.insert( - CONTENT_TYPE_HEADER, - HeaderValue::from_static(SSZ_CONTENT_TYPE_HEADER), - ); - let response = builder .headers(headers) .body(ssz_body) @@ -292,9 +301,21 @@ impl BuilderHttpClient { .push("blinded_blocks"); let mut headers = HeaderMap::new(); - if let Ok(value) = HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) { - headers.insert(CONSENSUS_VERSION_HEADER, value); - } + headers.insert( + CONSENSUS_VERSION_HEADER, + HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + headers.insert( + CONTENT_TYPE_HEADER, + HeaderValue::from_str(SSZ_CONTENT_TYPE_HEADER) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + headers.insert( + ACCEPT, + HeaderValue::from_str(PREFERENCE_ACCEPT_VALUE) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); let result = self .post_ssz_with_raw_response( @@ -326,9 +347,21 @@ impl BuilderHttpClient { .push("blinded_blocks"); let mut headers = HeaderMap::new(); - if let Ok(value) = HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) { - headers.insert(CONSENSUS_VERSION_HEADER, value); - } + headers.insert( + CONSENSUS_VERSION_HEADER, + HeaderValue::from_str(&blinded_block.fork_name_unchecked().to_string()) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + headers.insert( + CONTENT_TYPE_HEADER, + HeaderValue::from_str(JSON_CONTENT_TYPE_HEADER) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + headers.insert( + ACCEPT, + HeaderValue::from_str(JSON_ACCEPT_VALUE) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); Ok(self .post_with_raw_response( @@ -362,12 +395,20 @@ impl BuilderHttpClient { .push(pubkey.as_hex_string().as_str()); let mut headers = HeaderMap::new(); - if let Ok(ssz_content_type_header) = HeaderValue::from_str(&format!( - "{}; q=1.0,{}; q=0.9", - SSZ_CONTENT_TYPE_HEADER, JSON_CONTENT_TYPE_HEADER - )) { - headers.insert(ACCEPT, ssz_content_type_header); - }; + if self.disable_ssz { + headers.insert( + ACCEPT, + HeaderValue::from_str(JSON_CONTENT_TYPE_HEADER) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + } else { + // Indicate preference for ssz response in the accept header + headers.insert( + ACCEPT, + HeaderValue::from_str(PREFERENCE_ACCEPT_VALUE) + .map_err(|e| Error::InvalidHeaders(format!("{}", e)))?, + ); + } let resp = self .get_with_header(path, self.timeouts.get_header, headers) @@ -395,3 +436,18 @@ impl BuilderHttpClient { .await } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_headers_no_panic() { + for fork in ForkName::list_all() { + assert!(HeaderValue::from_str(&fork.to_string()).is_ok()); + } + assert!(HeaderValue::from_str(PREFERENCE_ACCEPT_VALUE).is_ok()); + assert!(HeaderValue::from_str(JSON_ACCEPT_VALUE).is_ok()); + assert!(HeaderValue::from_str(JSON_CONTENT_TYPE_HEADER).is_ok()); + } +} diff --git a/beacon_node/execution_layer/src/lib.rs b/beacon_node/execution_layer/src/lib.rs index 6e5e4fca01e..4fd7188c206 100644 --- a/beacon_node/execution_layer/src/lib.rs +++ b/beacon_node/execution_layer/src/lib.rs @@ -441,6 +441,8 @@ pub struct Config { pub builder_header_timeout: Option, /// User agent to send with requests to the builder API. pub builder_user_agent: Option, + /// Disable ssz requests on builder. Only use json. + pub disable_builder_ssz_requests: bool, /// JWT secret for the above endpoint running the engine api. pub secret_file: Option, /// The default fee recipient to use on the beacon node if none if provided from @@ -470,6 +472,7 @@ impl ExecutionLayer { builder_url, builder_user_agent, builder_header_timeout, + disable_builder_ssz_requests, secret_file, suggested_fee_recipient, jwt_id, @@ -539,7 +542,12 @@ impl ExecutionLayer { }; if let Some(builder_url) = builder_url { - el.set_builder_url(builder_url, builder_user_agent, builder_header_timeout)?; + el.set_builder_url( + builder_url, + builder_user_agent, + builder_header_timeout, + disable_builder_ssz_requests, + )?; } Ok(el) @@ -562,11 +570,13 @@ impl ExecutionLayer { builder_url: SensitiveUrl, builder_user_agent: Option, builder_header_timeout: Option, + disable_ssz: bool, ) -> Result<(), Error> { let builder_client = BuilderHttpClient::new( builder_url.clone(), builder_user_agent, builder_header_timeout, + disable_ssz, ) .map_err(Error::Builder)?; info!( @@ -574,6 +584,7 @@ impl ExecutionLayer { "Using external block builder"; "builder_url" => ?builder_url, "local_user_agent" => builder_client.get_user_agent(), + "ssz_disabled" => disable_ssz ); self.inner.builder.swap(Some(Arc::new(builder_client))); Ok(()) @@ -1901,7 +1912,14 @@ impl ExecutionLayer { if let Some(builder) = self.builder() { let (payload_result, duration) = timed_future(metrics::POST_BLINDED_PAYLOAD_BUILDER, async { - if builder.is_ssz_enabled() { + let ssz_enabled = builder.is_ssz_available(); + debug!( + self.log(), + "Calling submit_blinded_block on builder"; + "block_root" => ?block_root, + "ssz" => ssz_enabled + ); + if ssz_enabled { builder .post_builder_blinded_blocks_ssz(block) .await diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 4c2daecdd34..2c8b271bd25 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1460,6 +1460,15 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("builder-disable-ssz") + .long("builder-disable-ssz") + .value_name("BOOLEAN") + .help("Disables sending requests using SSZ over the builder API.") + .requires("builder") + .action(ArgAction::SetTrue) + .display_order(0) + ) .arg( Arg::new("reset-payload-statuses") .long("reset-payload-statuses") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 24d569bea22..84320762d6c 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -346,6 +346,8 @@ pub fn get_config( el_config.builder_header_timeout = clap_utils::parse_optional(cli_args, "builder-header-timeout")? .map(Duration::from_millis); + + el_config.disable_builder_ssz_requests = cli_args.get_flag("builder-disable-ssz"); } // Set config values from parse values. diff --git a/book/src/help_bn.md b/book/src/help_bn.md index cbcb1ec5a3d..79c8d8ead85 100644 --- a/book/src/help_bn.md +++ b/book/src/help_bn.md @@ -28,6 +28,8 @@ Options: network. Multiaddr is also supported. --builder The URL of a service compatible with the MEV-boost API. + --builder-disable-ssz + Disables sending requests using SSZ over the builder API. --builder-fallback-epochs-since-finalization If this node is proposing a block and the chain has not finalized within this number of epochs, it will NOT query any connected diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 03314930b9b..da10c2c4bd9 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -720,6 +720,40 @@ fn builder_user_agent() { ); } +#[test] +fn test_builder_disable_ssz_flag() { + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + None, + None, + |config| { + assert!( + !config + .execution_layer + .as_ref() + .unwrap() + .disable_builder_ssz_requests, + ); + }, + ); + run_payload_builder_flag_test_with_config( + "builder", + "http://meow.cats", + Some("builder-disable-ssz"), + None, + |config| { + assert!( + config + .execution_layer + .as_ref() + .unwrap() + .disable_builder_ssz_requests, + ); + }, + ); +} + fn run_jwt_optional_flags_test(jwt_flag: &str, jwt_id_flag: &str, jwt_version_flag: &str) { use sensitive_url::SensitiveUrl; From b4be5141823f9e828daf185c83e59c4c08e92ec1 Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 24 Feb 2025 00:39:15 -0300 Subject: [PATCH 04/10] Add spamoor_blob in network_params.yaml (#7012) Have blobs by default in deneb runs of kurtosis --- scripts/local_testnet/network_params.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/local_testnet/network_params.yaml b/scripts/local_testnet/network_params.yaml index b53d88e52c5..87ffeb8d224 100644 --- a/scripts/local_testnet/network_params.yaml +++ b/scripts/local_testnet/network_params.yaml @@ -14,4 +14,5 @@ global_log_level: debug snooper_enabled: false additional_services: - dora + - spamoor_blob - prometheus_grafana From 01df433dfd026a7de25a5b1275724a7e5a554a1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Oliveira?= Date: Mon, 24 Feb 2025 03:39:18 +0000 Subject: [PATCH 05/10] update codeowners, to be more specific (#7021) I keep being notified for PR's like https://github.com/sigp/lighthouse/pull/7009 where it doesn't touch the specified directories in the `CODEOWNERS` file. After reading the [docs](https://docs.github.com/en/repositories/managing-your-repositorys-settings-and-features/customizing-your-repository/about-code-owners) not having a forward slash in beginning of the path means: > In this example, @octocat owns any file in an apps directory > anywhere in your repository. whereas with the slashes: > In this example, @doctocat owns any file in the `/docs` > directory in the root of your repository and any of its > subdirectories. this update makes it more rigid for the files in the jxs ownership --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index f9478d13691..a8919337a93 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,2 @@ -beacon_node/network/ @jxs -beacon_node/lighthouse_network/ @jxs +/beacon_node/network/ @jxs +/beacon_node/lighthouse_network/ @jxs From 60964fc7b5304152c784db706e00354db0961292 Mon Sep 17 00:00:00 2001 From: Daniel Knopik <107140945+dknopik@users.noreply.github.com> Date: Mon, 24 Feb 2025 04:39:20 +0100 Subject: [PATCH 06/10] Expose blst internals (#6829) --- crypto/bls/src/generic_secret_key.rs | 22 +++++++++++++++ crypto/bls/src/generic_signature.rs | 41 ++++++++++++++++++++++++++-- crypto/bls/src/impls/blst.rs | 10 ++++++- crypto/bls/src/impls/fake_crypto.rs | 12 +++++++- crypto/bls/src/lib.rs | 5 +++- crypto/bls/tests/tests.rs | 17 +++++++++++- 6 files changed, 101 insertions(+), 6 deletions(-) diff --git a/crypto/bls/src/generic_secret_key.rs b/crypto/bls/src/generic_secret_key.rs index a0a43311107..62bfc1467db 100644 --- a/crypto/bls/src/generic_secret_key.rs +++ b/crypto/bls/src/generic_secret_key.rs @@ -61,6 +61,11 @@ where GenericPublicKey::from_point(self.point.public_key()) } + /// Returns a reference to the underlying BLS point. + pub fn point(&self) -> &Sec { + &self.point + } + /// Serialize `self` as compressed bytes. /// /// ## Note @@ -89,3 +94,20 @@ where } } } + +impl GenericSecretKey +where + Sig: TSignature, + Pub: TPublicKey, + Sec: TSecretKey + Clone, +{ + /// Instantiates `Self` from a `point`. + /// Takes a reference, as moves might accidentally leave behind key material + pub fn from_point(point: &Sec) -> Self { + Self { + point: point.clone(), + _phantom_signature: PhantomData, + _phantom_public_key: PhantomData, + } + } +} diff --git a/crypto/bls/src/generic_signature.rs b/crypto/bls/src/generic_signature.rs index 05e0a222bd5..0b375d3edd5 100644 --- a/crypto/bls/src/generic_signature.rs +++ b/crypto/bls/src/generic_signature.rs @@ -14,6 +14,9 @@ use tree_hash::TreeHash; /// The byte-length of a BLS signature when serialized in compressed form. pub const SIGNATURE_BYTES_LEN: usize = 96; +/// The byte-length of a BLS signature when serialized in uncompressed form. +pub const SIGNATURE_UNCOMPRESSED_BYTES_LEN: usize = 192; + /// Represents the signature at infinity. pub const INFINITY_SIGNATURE: [u8; SIGNATURE_BYTES_LEN] = [ 0xc0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, @@ -22,6 +25,16 @@ pub const INFINITY_SIGNATURE: [u8; SIGNATURE_BYTES_LEN] = [ 0, ]; +pub const INFINITY_SIGNATURE_UNCOMPRESSED: [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN] = [ + 0x40, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, +]; + /// The compressed bytes used to represent `GenericSignature::empty()`. pub const NONE_SIGNATURE: [u8; SIGNATURE_BYTES_LEN] = [0; SIGNATURE_BYTES_LEN]; @@ -31,9 +44,15 @@ pub trait TSignature: Sized + Clone { /// Serialize `self` as compressed bytes. fn serialize(&self) -> [u8; SIGNATURE_BYTES_LEN]; + /// Serialize `self` as uncompressed bytes. + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN]; + /// Deserialize `self` from compressed bytes. fn deserialize(bytes: &[u8]) -> Result; + /// Serialize `self` from uncompressed bytes. + fn deserialize_uncompressed(bytes: &[u8]) -> Result; + /// Returns `true` if `self` is a signature across `msg` by `pubkey`. fn verify(&self, pubkey: &GenericPublicKey, msg: Hash256) -> bool; } @@ -93,12 +112,12 @@ where } /// Returns a reference to the underlying BLS point. - pub(crate) fn point(&self) -> Option<&Sig> { + pub fn point(&self) -> Option<&Sig> { self.point.as_ref() } /// Instantiates `Self` from a `point`. - pub(crate) fn from_point(point: Sig, is_infinity: bool) -> Self { + pub fn from_point(point: Sig, is_infinity: bool) -> Self { Self { point: Some(point), is_infinity, @@ -115,6 +134,13 @@ where } } + /// Serialize `self` as compressed bytes. + pub fn serialize_uncompressed(&self) -> Option<[u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN]> { + self.point + .as_ref() + .map(|point| point.serialize_uncompressed()) + } + /// Deserialize `self` from compressed bytes. pub fn deserialize(bytes: &[u8]) -> Result { let point = if bytes == &NONE_SIGNATURE[..] { @@ -129,6 +155,17 @@ where _phantom: PhantomData, }) } + + /// Deserialize `self` from uncompressed bytes. + pub fn deserialize_uncompressed(bytes: &[u8]) -> Result { + // The "none signature" is a beacon chain concept. As we never directly deal with + // uncompressed signatures on the beacon chain, it does not apply here. + Ok(Self { + point: Some(Sig::deserialize_uncompressed(bytes)?), + is_infinity: bytes == &INFINITY_SIGNATURE_UNCOMPRESSED[..], + _phantom: PhantomData, + }) + } } impl GenericSignature diff --git a/crypto/bls/src/impls/blst.rs b/crypto/bls/src/impls/blst.rs index baa704e05a9..6ca0fe09b2d 100644 --- a/crypto/bls/src/impls/blst.rs +++ b/crypto/bls/src/impls/blst.rs @@ -5,7 +5,7 @@ use crate::{ GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, }, generic_secret_key::TSecretKey, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, + generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, BlstError, Error, Hash256, ZeroizeHash, INFINITY_SIGNATURE, }; pub use blst::min_pk as blst_core; @@ -189,10 +189,18 @@ impl TSignature for blst_core::Signature { self.to_bytes() } + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN] { + self.serialize() + } + fn deserialize(bytes: &[u8]) -> Result { Self::from_bytes(bytes).map_err(Into::into) } + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Self::deserialize(bytes).map_err(Into::into) + } + fn verify(&self, pubkey: &blst_core::PublicKey, msg: Hash256) -> bool { // Public keys have already been checked for subgroup and infinity // Check Signature inside function for subgroup diff --git a/crypto/bls/src/impls/fake_crypto.rs b/crypto/bls/src/impls/fake_crypto.rs index a09fb347e6b..7273697597b 100644 --- a/crypto/bls/src/impls/fake_crypto.rs +++ b/crypto/bls/src/impls/fake_crypto.rs @@ -5,7 +5,7 @@ use crate::{ GenericPublicKey, TPublicKey, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, }, generic_secret_key::{TSecretKey, SECRET_KEY_BYTES_LEN}, - generic_signature::{TSignature, SIGNATURE_BYTES_LEN}, + generic_signature::{TSignature, SIGNATURE_BYTES_LEN, SIGNATURE_UNCOMPRESSED_BYTES_LEN}, Error, Hash256, ZeroizeHash, INFINITY_PUBLIC_KEY, INFINITY_SIGNATURE, }; @@ -106,12 +106,22 @@ impl TSignature for Signature { self.0 } + fn serialize_uncompressed(&self) -> [u8; SIGNATURE_UNCOMPRESSED_BYTES_LEN] { + let mut ret = [0; SIGNATURE_UNCOMPRESSED_BYTES_LEN]; + ret[0..SIGNATURE_BYTES_LEN].copy_from_slice(&self.0); + ret + } + fn deserialize(bytes: &[u8]) -> Result { let mut signature = Self::infinity(); signature.0[..].copy_from_slice(&bytes[0..SIGNATURE_BYTES_LEN]); Ok(signature) } + fn deserialize_uncompressed(bytes: &[u8]) -> Result { + Self::deserialize(bytes) + } + fn verify(&self, _pubkey: &PublicKey, _msg: Hash256) -> bool { true } diff --git a/crypto/bls/src/lib.rs b/crypto/bls/src/lib.rs index 6ea85548c0d..13b6dc2f2c7 100644 --- a/crypto/bls/src/lib.rs +++ b/crypto/bls/src/lib.rs @@ -37,7 +37,10 @@ pub use generic_public_key::{ INFINITY_PUBLIC_KEY, PUBLIC_KEY_BYTES_LEN, PUBLIC_KEY_UNCOMPRESSED_BYTES_LEN, }; pub use generic_secret_key::SECRET_KEY_BYTES_LEN; -pub use generic_signature::{INFINITY_SIGNATURE, SIGNATURE_BYTES_LEN}; +pub use generic_signature::{ + INFINITY_SIGNATURE, INFINITY_SIGNATURE_UNCOMPRESSED, SIGNATURE_BYTES_LEN, + SIGNATURE_UNCOMPRESSED_BYTES_LEN, +}; pub use get_withdrawal_credentials::get_withdrawal_credentials; pub use zeroize_hash::ZeroizeHash; diff --git a/crypto/bls/tests/tests.rs b/crypto/bls/tests/tests.rs index 26215771b5f..611dabbd648 100644 --- a/crypto/bls/tests/tests.rs +++ b/crypto/bls/tests/tests.rs @@ -1,4 +1,7 @@ -use bls::{FixedBytesExtended, Hash256, INFINITY_SIGNATURE, SECRET_KEY_BYTES_LEN}; +use bls::{ + FixedBytesExtended, Hash256, INFINITY_SIGNATURE, INFINITY_SIGNATURE_UNCOMPRESSED, + SECRET_KEY_BYTES_LEN, +}; use ssz::{Decode, Encode}; use std::borrow::Cow; use std::fmt::Debug; @@ -37,6 +40,18 @@ macro_rules! test_suite { assert!(AggregateSignature::infinity().is_infinity()); } + #[test] + fn infinity_sig_serializations_match() { + let sig = Signature::deserialize(&INFINITY_SIGNATURE).unwrap(); + assert_eq!( + sig.serialize_uncompressed().unwrap(), + INFINITY_SIGNATURE_UNCOMPRESSED + ); + let sig = + Signature::deserialize_uncompressed(&INFINITY_SIGNATURE_UNCOMPRESSED).unwrap(); + assert_eq!(sig.serialize(), INFINITY_SIGNATURE); + } + #[test] fn ssz_round_trip_multiple_types() { let mut agg_sig = AggregateSignature::infinity(); From 3fab6a2c0ba702c20c38d3083a3c533ea647dcac Mon Sep 17 00:00:00 2001 From: Lion - dapplion <35266934+dapplion@users.noreply.github.com> Date: Mon, 24 Feb 2025 01:47:09 -0300 Subject: [PATCH 07/10] Block availability data enum (#6866) PeerDAS has undergone multiple refactors + the blending with the get_blobs optimization has generated technical debt. A function signature like this https://github.com/sigp/lighthouse/blob/f008b84079bbb6eb86de22bb3421dfc8263a5650/beacon_node/beacon_chain/src/beacon_chain.rs#L7171-L7178 Allows at least the following combination of states: - blobs: Some / None - data_columns: Some / None - data_column_recv: Some / None - Block has data? Yes / No - Block post-PeerDAS? Yes / No In reality, we don't have that many possible states, only: - `NoData`: pre-deneb, pre-PeerDAS with 0 blobs or post-PeerDAS with 0 blobs - `Blobs(BlobSidecarList)`: post-Deneb pre-PeerDAS with > 0 blobs - `DataColumns(DataColumnSidecarList)`: post-PeerDAS with > 0 blobs - `DataColumnsRecv(oneshot::Receiver>)`: post-PeerDAS with > 0 blobs, but we obtained the columns via reconstruction ^ this are the variants of the new `AvailableBlockData` enum So we go from 2^5 states to 4 well-defined. Downstream code benefits nicely from this clarity and I think it makes the whole feature much more maintainable. Currently `is_available` returns a bool, and then we construct the available block in `make_available`. In a way the availability condition is duplicated in both functions. Instead, this PR constructs `AvailableBlockData` in `is_available` so the availability conditions are written once ```rust if let Some(block_data) = is_available(..) { let available_block = make_available(block_data); } ``` --- beacon_node/beacon_chain/src/beacon_chain.rs | 110 +++-- .../beacon_chain/src/block_verification.rs | 1 - .../src/block_verification_types.rs | 16 +- .../src/data_availability_checker.rs | 153 ++++--- .../src/data_availability_checker/error.rs | 4 +- .../overflow_lru_cache.rs | 417 ++++++++++-------- .../state_lru_cache.rs | 1 - .../beacon_chain/src/early_attester_cache.rs | 17 +- .../beacon_chain/src/historical_blocks.rs | 38 +- .../tests/attestation_production.rs | 4 +- beacon_node/beacon_chain/tests/store_tests.rs | 20 +- 11 files changed, 426 insertions(+), 355 deletions(-) diff --git a/beacon_node/beacon_chain/src/beacon_chain.rs b/beacon_node/beacon_chain/src/beacon_chain.rs index b89dbe3dcae..ad31e085caf 100644 --- a/beacon_node/beacon_chain/src/beacon_chain.rs +++ b/beacon_node/beacon_chain/src/beacon_chain.rs @@ -21,8 +21,8 @@ use crate::block_verification_types::{ pub use crate::canonical_head::CanonicalHead; use crate::chain_config::ChainConfig; use crate::data_availability_checker::{ - Availability, AvailabilityCheckError, AvailableBlock, DataAvailabilityChecker, - DataColumnReconstructionResult, + Availability, AvailabilityCheckError, AvailableBlock, AvailableBlockData, + DataAvailabilityChecker, DataColumnReconstructionResult, }; use crate::data_column_verification::{GossipDataColumnError, GossipVerifiedDataColumn}; use crate::early_attester_cache::EarlyAttesterCache; @@ -3169,7 +3169,14 @@ impl BeaconChain { return Err(BlockError::DuplicateFullyImported(block_root)); } - self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + // process_engine_blobs is called for both pre and post PeerDAS. However, post PeerDAS + // consumers don't expect the blobs event to fire erratically. + if !self + .spec + .is_peer_das_enabled_for_epoch(slot.epoch(T::EthSpec::slots_per_epoch())) + { + self.emit_sse_blob_sidecar_events(&block_root, blobs.iter().flatten().map(Arc::as_ref)); + } let r = self .check_engine_blob_availability_and_import(slot, block_root, blobs, data_column_recv) @@ -3640,9 +3647,12 @@ impl BeaconChain { data_column_recv: Option>>, ) -> Result { self.check_blobs_for_slashability(block_root, &blobs)?; - let availability = - self.data_availability_checker - .put_engine_blobs(block_root, blobs, data_column_recv)?; + let availability = self.data_availability_checker.put_engine_blobs( + block_root, + slot.epoch(T::EthSpec::slots_per_epoch()), + blobs, + data_column_recv, + )?; self.process_availability(slot, availability, || Ok(())) .await @@ -3727,7 +3737,6 @@ impl BeaconChain { parent_eth1_finalization_data, confirmed_state_roots, consensus_context, - data_column_recv, } = import_data; // Record the time at which this block's blobs became available. @@ -3755,7 +3764,6 @@ impl BeaconChain { parent_block, parent_eth1_finalization_data, consensus_context, - data_column_recv, ) }, "payload_verification_handle", @@ -3794,7 +3802,6 @@ impl BeaconChain { parent_block: SignedBlindedBeaconBlock, parent_eth1_finalization_data: Eth1FinalizationData, mut consensus_context: ConsensusContext, - data_column_recv: Option>>, ) -> Result { // ----------------------------- BLOCK NOT YET ATTESTABLE ---------------------------------- // Everything in this initial section is on the hot path between processing the block and @@ -3892,7 +3899,7 @@ impl BeaconChain { if let Some(proto_block) = fork_choice.get_block(&block_root) { if let Err(e) = self.early_attester_cache.add_head_block( block_root, - signed_block.clone(), + &signed_block, proto_block, &state, &self.spec, @@ -3961,15 +3968,9 @@ impl BeaconChain { // If the write fails, revert fork choice to the version from disk, else we can // end up with blocks in fork choice that are missing from disk. // See https://github.com/sigp/lighthouse/issues/2028 - let (_, signed_block, blobs, data_columns) = signed_block.deconstruct(); + let (_, signed_block, block_data) = signed_block.deconstruct(); - match self.get_blobs_or_columns_store_op( - block_root, - signed_block.epoch(), - blobs, - data_columns, - data_column_recv, - ) { + match self.get_blobs_or_columns_store_op(block_root, block_data) { Ok(Some(blobs_or_columns_store_op)) => { ops.push(blobs_or_columns_store_op); } @@ -7218,29 +7219,34 @@ impl BeaconChain { } } - fn get_blobs_or_columns_store_op( + pub(crate) fn get_blobs_or_columns_store_op( &self, block_root: Hash256, - block_epoch: Epoch, - blobs: Option>, - data_columns: Option>, - data_column_recv: Option>>, + block_data: AvailableBlockData, ) -> Result>, String> { - if self.spec.is_peer_das_enabled_for_epoch(block_epoch) { - // TODO(das) we currently store all subnet sampled columns. Tracking issue to exclude non - // custody columns: https://github.com/sigp/lighthouse/issues/6465 - let custody_columns_count = self.data_availability_checker.get_sampling_column_count(); + // TODO(das) we currently store all subnet sampled columns. Tracking issue to exclude non + // custody columns: https://github.com/sigp/lighthouse/issues/6465 + let _custody_columns_count = self.data_availability_checker.get_sampling_column_count(); - let custody_columns_available = data_columns - .as_ref() - .as_ref() - .is_some_and(|columns| columns.len() == custody_columns_count); - - let data_columns_to_persist = if custody_columns_available { - // If the block was made available via custody columns received from gossip / rpc, use them - // since we already have them. - data_columns - } else if let Some(data_column_recv) = data_column_recv { + match block_data { + AvailableBlockData::NoData => Ok(None), + AvailableBlockData::Blobs(blobs) => { + debug!( + self.log, "Writing blobs to store"; + "block_root" => %block_root, + "count" => blobs.len(), + ); + Ok(Some(StoreOp::PutBlobs(block_root, blobs))) + } + AvailableBlockData::DataColumns(data_columns) => { + debug!( + self.log, "Writing data columns to store"; + "block_root" => %block_root, + "count" => data_columns.len(), + ); + Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))) + } + AvailableBlockData::DataColumnsRecv(data_column_recv) => { // Blobs were available from the EL, in this case we wait for the data columns to be computed (blocking). let _column_recv_timer = metrics::start_timer(&metrics::BLOCK_PROCESSING_DATA_COLUMNS_WAIT); @@ -7250,34 +7256,18 @@ impl BeaconChain { let computed_data_columns = data_column_recv .blocking_recv() .map_err(|e| format!("Did not receive data columns from sender: {e:?}"))?; - Some(computed_data_columns) - } else { - // No blobs in the block. - None - }; - - if let Some(data_columns) = data_columns_to_persist { - if !data_columns.is_empty() { - debug!( - self.log, "Writing data_columns to store"; - "block_root" => %block_root, - "count" => data_columns.len(), - ); - return Ok(Some(StoreOp::PutDataColumns(block_root, data_columns))); - } - } - } else if let Some(blobs) = blobs { - if !blobs.is_empty() { debug!( - self.log, "Writing blobs to store"; + self.log, "Writing data columns to store"; "block_root" => %block_root, - "count" => blobs.len(), + "count" => computed_data_columns.len(), ); - return Ok(Some(StoreOp::PutBlobs(block_root, blobs))); + // TODO(das): Store only this node's custody columns + Ok(Some(StoreOp::PutDataColumns( + block_root, + computed_data_columns, + ))) } } - - Ok(None) } } diff --git a/beacon_node/beacon_chain/src/block_verification.rs b/beacon_node/beacon_chain/src/block_verification.rs index 12652763763..9a8def585fa 100644 --- a/beacon_node/beacon_chain/src/block_verification.rs +++ b/beacon_node/beacon_chain/src/block_verification.rs @@ -1707,7 +1707,6 @@ impl ExecutionPendingBlock { parent_eth1_finalization_data, confirmed_state_roots, consensus_context, - data_column_recv: None, }, payload_verification_handle, }) diff --git a/beacon_node/beacon_chain/src/block_verification_types.rs b/beacon_node/beacon_chain/src/block_verification_types.rs index 38d0fc708ca..07ffae77129 100644 --- a/beacon_node/beacon_chain/src/block_verification_types.rs +++ b/beacon_node/beacon_chain/src/block_verification_types.rs @@ -7,11 +7,10 @@ use derivative::Derivative; use state_processing::ConsensusContext; use std::fmt::{Debug, Formatter}; use std::sync::Arc; -use tokio::sync::oneshot; use types::blob_sidecar::BlobIdentifier; use types::{ - BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, DataColumnSidecarList, - Epoch, EthSpec, Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, + BeaconBlockRef, BeaconState, BlindedPayload, BlobSidecarList, ChainSpec, Epoch, EthSpec, + Hash256, RuntimeVariableList, SignedBeaconBlock, SignedBeaconBlockHeader, Slot, }; /// A block that has been received over RPC. It has 2 internal variants: @@ -265,7 +264,6 @@ impl ExecutedBlock { /// A block that has completed all pre-deneb block processing checks including verification /// by an EL client **and** has all requisite blob data to be imported into fork choice. -#[derive(PartialEq)] pub struct AvailableExecutedBlock { pub block: AvailableBlock, pub import_data: BlockImportData, @@ -338,8 +336,7 @@ impl AvailabilityPendingExecutedBlock { } } -#[derive(Debug, Derivative)] -#[derivative(PartialEq)] +#[derive(Debug, PartialEq)] pub struct BlockImportData { pub block_root: Hash256, pub state: BeaconState, @@ -347,12 +344,6 @@ pub struct BlockImportData { pub parent_eth1_finalization_data: Eth1FinalizationData, pub confirmed_state_roots: Vec, pub consensus_context: ConsensusContext, - #[derivative(PartialEq = "ignore")] - /// An optional receiver for `DataColumnSidecarList`. - /// - /// This field is `Some` when data columns are being computed asynchronously. - /// The resulting `DataColumnSidecarList` will be sent through this receiver. - pub data_column_recv: Option>>, } impl BlockImportData { @@ -371,7 +362,6 @@ impl BlockImportData { }, confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), - data_column_recv: None, } } } diff --git a/beacon_node/beacon_chain/src/data_availability_checker.rs b/beacon_node/beacon_chain/src/data_availability_checker.rs index f10d59ca1a5..875645ee9f4 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker.rs @@ -91,7 +91,6 @@ pub enum DataColumnReconstructionResult { /// /// Indicates if the block is fully `Available` or if we need blobs or blocks /// to "complete" the requirements for an `AvailableBlock`. -#[derive(PartialEq)] pub enum Availability { MissingComponents(Hash256), Available(Box>), @@ -219,7 +218,7 @@ impl DataAvailabilityChecker { .map_err(AvailabilityCheckError::InvalidBlobs)?; self.availability_cache - .put_kzg_verified_blobs(block_root, verified_blobs, None, &self.log) + .put_kzg_verified_blobs(block_root, verified_blobs, &self.log) } /// Put a list of custody columns received via RPC into the availability cache. This performs KZG @@ -253,23 +252,29 @@ impl DataAvailabilityChecker { pub fn put_engine_blobs( &self, block_root: Hash256, + block_epoch: Epoch, blobs: FixedBlobSidecarList, - data_column_recv: Option>>, + data_columns_recv: Option>>, ) -> Result, AvailabilityCheckError> { - let seen_timestamp = self - .slot_clock - .now_duration() - .ok_or(AvailabilityCheckError::SlotClockError)?; - - let verified_blobs = - KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp); - - self.availability_cache.put_kzg_verified_blobs( - block_root, - verified_blobs, - data_column_recv, - &self.log, - ) + // `data_columns_recv` is always Some if block_root is post-PeerDAS + if let Some(data_columns_recv) = data_columns_recv { + self.availability_cache.put_computed_data_columns_recv( + block_root, + block_epoch, + data_columns_recv, + &self.log, + ) + } else { + let seen_timestamp = self + .slot_clock + .now_duration() + .ok_or(AvailabilityCheckError::SlotClockError)?; + self.availability_cache.put_kzg_verified_blobs( + block_root, + KzgVerifiedBlobList::from_verified(blobs.iter().flatten().cloned(), seen_timestamp), + &self.log, + ) + } } /// Check if we've cached other blobs for this block. If it completes a set and we also @@ -284,7 +289,6 @@ impl DataAvailabilityChecker { self.availability_cache.put_kzg_verified_blobs( gossip_blob.block_root(), vec![gossip_blob.into_inner()], - None, &self.log, ) } @@ -338,15 +342,14 @@ impl DataAvailabilityChecker { ) -> Result, AvailabilityCheckError> { let (block_root, block, blobs, data_columns) = block.deconstruct(); if self.blobs_required_for_block(&block) { - return if let Some(blob_list) = blobs.as_ref() { + return if let Some(blob_list) = blobs { verify_kzg_for_blob_list(blob_list.iter(), &self.kzg) .map_err(AvailabilityCheckError::InvalidBlobs)?; Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs, + blob_data: AvailableBlockData::Blobs(blob_list), blobs_available_timestamp: None, - data_columns: None, spec: self.spec.clone(), })) } else { @@ -365,14 +368,13 @@ impl DataAvailabilityChecker { Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: None, - blobs_available_timestamp: None, - data_columns: Some( + blob_data: AvailableBlockData::DataColumns( data_column_list .into_iter() .map(|d| d.clone_arc()) .collect(), ), + blobs_available_timestamp: None, spec: self.spec.clone(), })) } else { @@ -383,9 +385,8 @@ impl DataAvailabilityChecker { Ok(MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: None, + blob_data: AvailableBlockData::NoData, blobs_available_timestamp: None, - data_columns: None, spec: self.spec.clone(), })) } @@ -437,27 +438,25 @@ impl DataAvailabilityChecker { let (block_root, block, blobs, data_columns) = block.deconstruct(); let maybe_available_block = if self.blobs_required_for_block(&block) { - if blobs.is_some() { + if let Some(blobs) = blobs { MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs, + blob_data: AvailableBlockData::Blobs(blobs), blobs_available_timestamp: None, - data_columns: None, spec: self.spec.clone(), }) } else { MaybeAvailableBlock::AvailabilityPending { block_root, block } } } else if self.data_columns_required_for_block(&block) { - if data_columns.is_some() { + if let Some(data_columns) = data_columns { MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: None, - data_columns: data_columns.map(|data_columns| { - data_columns.into_iter().map(|d| d.into_inner()).collect() - }), + blob_data: AvailableBlockData::DataColumns( + data_columns.into_iter().map(|d| d.into_inner()).collect(), + ), blobs_available_timestamp: None, spec: self.spec.clone(), }) @@ -468,8 +467,7 @@ impl DataAvailabilityChecker { MaybeAvailableBlock::Available(AvailableBlock { block_root, block, - blobs: None, - data_columns: None, + blob_data: AvailableBlockData::NoData, blobs_available_timestamp: None, spec: self.spec.clone(), }) @@ -545,11 +543,11 @@ impl DataAvailabilityChecker { &self, block_root: &Hash256, ) -> Result, AvailabilityCheckError> { - let pending_components = match self + let verified_data_columns = match self .availability_cache .check_and_set_reconstruction_started(block_root) { - ReconstructColumnsDecision::Yes(pending_components) => pending_components, + ReconstructColumnsDecision::Yes(verified_data_columns) => verified_data_columns, ReconstructColumnsDecision::No(reason) => { return Ok(DataColumnReconstructionResult::NotStarted(reason)); } @@ -560,7 +558,7 @@ impl DataAvailabilityChecker { let all_data_columns = KzgVerifiedCustodyDataColumn::reconstruct_columns( &self.kzg, - &pending_components.verified_data_columns, + &verified_data_columns, &self.spec, ) .map_err(|e| { @@ -713,13 +711,25 @@ async fn availability_cache_maintenance_service( } } +#[derive(Debug)] +pub enum AvailableBlockData { + /// Block is pre-Deneb or has zero blobs + NoData, + /// Block is post-Deneb, pre-PeerDAS and has more than zero blobs + Blobs(BlobSidecarList), + /// Block is post-PeerDAS and has more than zero blobs + DataColumns(DataColumnSidecarList), + /// Block is post-PeerDAS, has more than zero blobs and we recomputed the columns from the EL's + /// mempool blobs + DataColumnsRecv(oneshot::Receiver>), +} + /// A fully available block that is ready to be imported into fork choice. -#[derive(Clone, Debug, PartialEq)] +#[derive(Debug)] pub struct AvailableBlock { block_root: Hash256, block: Arc>, - blobs: Option>, - data_columns: Option>, + blob_data: AvailableBlockData, /// Timestamp at which this block first became available (UNIX timestamp, time since 1970). blobs_available_timestamp: Option, pub spec: Arc, @@ -729,15 +739,13 @@ impl AvailableBlock { pub fn __new_for_testing( block_root: Hash256, block: Arc>, - blobs: Option>, - data_columns: Option>, + data: AvailableBlockData, spec: Arc, ) -> Self { Self { block_root, block, - blobs, - data_columns, + blob_data: data, blobs_available_timestamp: None, spec, } @@ -750,39 +758,56 @@ impl AvailableBlock { self.block.clone() } - pub fn blobs(&self) -> Option<&BlobSidecarList> { - self.blobs.as_ref() - } - pub fn blobs_available_timestamp(&self) -> Option { self.blobs_available_timestamp } - pub fn data_columns(&self) -> Option<&DataColumnSidecarList> { - self.data_columns.as_ref() + pub fn data(&self) -> &AvailableBlockData { + &self.blob_data + } + + pub fn has_blobs(&self) -> bool { + match self.blob_data { + AvailableBlockData::NoData => false, + AvailableBlockData::Blobs(..) => true, + AvailableBlockData::DataColumns(_) => false, + AvailableBlockData::DataColumnsRecv(_) => false, + } } #[allow(clippy::type_complexity)] - pub fn deconstruct( - self, - ) -> ( - Hash256, - Arc>, - Option>, - Option>, - ) { + pub fn deconstruct(self) -> (Hash256, Arc>, AvailableBlockData) { let AvailableBlock { block_root, block, - blobs, - data_columns, + blob_data, .. } = self; - (block_root, block, blobs, data_columns) + (block_root, block, blob_data) + } + + /// Only used for testing + pub fn __clone_without_recv(&self) -> Result { + Ok(Self { + block_root: self.block_root, + block: self.block.clone(), + blob_data: match &self.blob_data { + AvailableBlockData::NoData => AvailableBlockData::NoData, + AvailableBlockData::Blobs(blobs) => AvailableBlockData::Blobs(blobs.clone()), + AvailableBlockData::DataColumns(data_columns) => { + AvailableBlockData::DataColumns(data_columns.clone()) + } + AvailableBlockData::DataColumnsRecv(_) => { + return Err("Can't clone DataColumnsRecv".to_owned()) + } + }, + blobs_available_timestamp: self.blobs_available_timestamp, + spec: self.spec.clone(), + }) } } -#[derive(Debug, Clone)] +#[derive(Debug)] pub enum MaybeAvailableBlock { /// This variant is fully available. /// i.e. for pre-deneb blocks, it contains a (`SignedBeaconBlock`, `Blobs::None`) and for diff --git a/beacon_node/beacon_chain/src/data_availability_checker/error.rs b/beacon_node/beacon_chain/src/data_availability_checker/error.rs index 1ab85ab1056..4e75ed4945e 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/error.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/error.rs @@ -10,7 +10,7 @@ pub enum Error { blob_commitment: KzgCommitment, block_commitment: KzgCommitment, }, - Unexpected, + Unexpected(&'static str), SszTypes(ssz_types::Error), MissingBlobs, MissingCustodyColumns, @@ -40,7 +40,7 @@ impl Error { | Error::MissingCustodyColumns | Error::StoreError(_) | Error::DecodeError(_) - | Error::Unexpected + | Error::Unexpected(_) | Error::ParentStateMissing(_) | Error::BlockReplayError(_) | Error::RebuildingStateCaches(_) diff --git a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs index 034a6582ad2..78de5389299 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/overflow_lru_cache.rs @@ -1,4 +1,5 @@ use super::state_lru_cache::{DietAvailabilityPendingExecutedBlock, StateLRUCache}; +use super::AvailableBlockData; use crate::beacon_chain::BeaconStore; use crate::blob_verification::KzgVerifiedBlob; use crate::block_verification_types::{ @@ -10,6 +11,7 @@ use crate::BeaconChainTypes; use lru::LruCache; use parking_lot::RwLock; use slog::{debug, Logger}; +use std::cmp::Ordering; use std::num::NonZeroUsize; use std::sync::Arc; use tokio::sync::oneshot; @@ -39,19 +41,6 @@ pub struct PendingComponents { } impl PendingComponents { - /// Clones the `PendingComponent` without cloning `data_column_recv`, as `Receiver` is not cloneable. - /// This should only be used when the receiver is no longer needed. - pub fn clone_without_column_recv(&self) -> Self { - PendingComponents { - block_root: self.block_root, - verified_blobs: self.verified_blobs.clone(), - verified_data_columns: self.verified_data_columns.clone(), - executed_block: self.executed_block.clone(), - reconstruction_started: self.reconstruction_started, - data_column_recv: None, - } - } - /// Returns an immutable reference to the cached block. pub fn get_cached_block(&self) -> &Option> { &self.executed_block @@ -95,26 +84,6 @@ impl PendingComponents { .unwrap_or(false) } - /// Returns the number of blobs that are expected to be present. Returns `None` if we don't have a - /// block. - /// - /// This corresponds to the number of commitments that are present in a block. - pub fn block_kzg_commitments_count(&self) -> Option { - self.get_cached_block() - .as_ref() - .map(|b| b.get_commitments().len()) - } - - /// Returns the number of blobs that have been received and are stored in the cache. - pub fn num_received_blobs(&self) -> usize { - self.get_cached_blobs().iter().flatten().count() - } - - /// Returns the number of data columns that have been received and are stored in the cache. - pub fn num_received_data_columns(&self) -> usize { - self.verified_data_columns.len() - } - /// Returns the indices of cached custody columns pub fn get_cached_data_columns_indices(&self) -> Vec { self.verified_data_columns @@ -189,146 +158,135 @@ impl PendingComponents { self.merge_blobs(reinsert); } - /// Checks if the block and all of its expected blobs or custody columns (post-PeerDAS) are - /// available in the cache. - /// - /// Returns `true` if both the block exists and the number of received blobs / custody columns - /// matches the number of expected blobs / custody columns. - pub fn is_available(&self, custody_column_count: usize, log: &Logger) -> bool { - let block_kzg_commitments_count_opt = self.block_kzg_commitments_count(); - let expected_blobs_msg = block_kzg_commitments_count_opt - .as_ref() - .map(|num| num.to_string()) - .unwrap_or("unknown".to_string()); - - // No data columns when there are 0 blobs - let expected_columns_opt = block_kzg_commitments_count_opt.map(|blob_count| { - if blob_count > 0 { - custody_column_count - } else { - 0 - } - }); - let expected_columns_msg = expected_columns_opt - .as_ref() - .map(|num| num.to_string()) - .unwrap_or("unknown".to_string()); - - let num_received_blobs = self.num_received_blobs(); - let num_received_columns = self.num_received_data_columns(); - - debug!( - log, - "Component(s) added to data availability checker"; - "block_root" => ?self.block_root, - "received_blobs" => num_received_blobs, - "expected_blobs" => expected_blobs_msg, - "received_columns" => num_received_columns, - "expected_columns" => expected_columns_msg, - ); - - let all_blobs_received = block_kzg_commitments_count_opt - .is_some_and(|num_expected_blobs| num_expected_blobs == num_received_blobs); - - let all_columns_received = expected_columns_opt - .is_some_and(|num_expected_columns| num_expected_columns == num_received_columns); - - all_blobs_received || all_columns_received - } - - /// Returns an empty `PendingComponents` object with the given block root. - pub fn empty(block_root: Hash256, max_len: usize) -> Self { - Self { - block_root, - verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), - verified_data_columns: vec![], - executed_block: None, - reconstruction_started: false, - data_column_recv: None, - } - } - - /// Verifies an `SignedBeaconBlock` against a set of KZG verified blobs. - /// This does not check whether a block *should* have blobs, these checks should have been - /// completed when producing the `AvailabilityPendingBlock`. + /// Returns Some if the block has received all its required data for import. The return value + /// must be persisted in the DB along with the block. /// /// WARNING: This function can potentially take a lot of time if the state needs to be /// reconstructed from disk. Ensure you are not holding any write locks while calling this. pub fn make_available( - self, + &mut self, + custody_column_count: usize, spec: &Arc, recover: R, - ) -> Result, AvailabilityCheckError> + ) -> Result>, AvailabilityCheckError> where R: FnOnce( DietAvailabilityPendingExecutedBlock, ) -> Result, AvailabilityCheckError>, { - let Self { - block_root, - verified_blobs, - verified_data_columns, - executed_block, - data_column_recv, - .. - } = self; - - let blobs_available_timestamp = verified_blobs - .iter() - .flatten() - .map(|blob| blob.seen_timestamp()) - .max(); - - let Some(diet_executed_block) = executed_block else { - return Err(AvailabilityCheckError::Unexpected); + let Some(block) = &self.executed_block else { + // Block not available yet + return Ok(None); }; - let is_peer_das_enabled = spec.is_peer_das_enabled_for_epoch(diet_executed_block.epoch()); - let (blobs, data_columns) = if is_peer_das_enabled { - let data_columns = verified_data_columns - .into_iter() - .map(|d| d.into_inner()) - .collect::>(); - (None, Some(data_columns)) + let num_expected_blobs = block.num_blobs_expected(); + + let blob_data = if num_expected_blobs == 0 { + Some(AvailableBlockData::NoData) + } else if spec.is_peer_das_enabled_for_epoch(block.epoch()) { + match self.verified_data_columns.len().cmp(&custody_column_count) { + Ordering::Greater => { + // Should never happen + return Err(AvailabilityCheckError::Unexpected("too many columns")); + } + Ordering::Equal => { + // Block is post-peerdas, and we got enough columns + let data_columns = self + .verified_data_columns + .iter() + .map(|d| d.clone().into_inner()) + .collect::>(); + Some(AvailableBlockData::DataColumns(data_columns)) + } + Ordering::Less => { + // The data_columns_recv is an infallible promise that we will receive all expected + // columns, so we consider the block available. + // We take the receiver as it can't be cloned, and make_available should never + // be called again once it returns `Some`. + self.data_column_recv + .take() + .map(AvailableBlockData::DataColumnsRecv) + } + } } else { - let num_blobs_expected = diet_executed_block.num_blobs_expected(); - let Some(verified_blobs) = verified_blobs - .into_iter() - .map(|b| b.map(|b| b.to_blob())) - .take(num_blobs_expected) - .collect::>>() - else { - return Err(AvailabilityCheckError::Unexpected); - }; - let max_len = spec.max_blobs_per_block(diet_executed_block.as_block().epoch()) as usize; - ( - Some(RuntimeVariableList::new(verified_blobs, max_len)?), - None, - ) + // Before PeerDAS, blobs + let num_received_blobs = self.verified_blobs.iter().flatten().count(); + match num_received_blobs.cmp(&num_expected_blobs) { + Ordering::Greater => { + // Should never happen + return Err(AvailabilityCheckError::Unexpected("too many blobs")); + } + Ordering::Equal => { + let max_blobs = spec.max_blobs_per_block(block.epoch()) as usize; + let blobs_vec = self + .verified_blobs + .iter() + .flatten() + .map(|blob| blob.clone().to_blob()) + .collect::>(); + let blobs = RuntimeVariableList::new(blobs_vec, max_blobs) + .map_err(|_| AvailabilityCheckError::Unexpected("over max_blobs"))?; + Some(AvailableBlockData::Blobs(blobs)) + } + Ordering::Less => { + // Not enough blobs received yet + None + } + } + }; + + // Block's data not available yet + let Some(blob_data) = blob_data else { + return Ok(None); + }; + + // Block is available, construct `AvailableExecutedBlock` + + let blobs_available_timestamp = match blob_data { + AvailableBlockData::NoData => None, + AvailableBlockData::Blobs(_) => self + .verified_blobs + .iter() + .flatten() + .map(|blob| blob.seen_timestamp()) + .max(), + // TODO(das): To be fixed with https://github.com/sigp/lighthouse/pull/6850 + AvailableBlockData::DataColumns(_) => None, + AvailableBlockData::DataColumnsRecv(_) => None, }; - let executed_block = recover(diet_executed_block)?; let AvailabilityPendingExecutedBlock { block, - mut import_data, + import_data, payload_verification_outcome, - } = executed_block; - - import_data.data_column_recv = data_column_recv; + } = recover(block.clone())?; let available_block = AvailableBlock { - block_root, + block_root: self.block_root, block, - blobs, - data_columns, + blob_data, blobs_available_timestamp, spec: spec.clone(), }; - Ok(Availability::Available(Box::new( - AvailableExecutedBlock::new(available_block, import_data, payload_verification_outcome), + Ok(Some(AvailableExecutedBlock::new( + available_block, + import_data, + payload_verification_outcome, ))) } + /// Returns an empty `PendingComponents` object with the given block root. + pub fn empty(block_root: Hash256, max_len: usize) -> Self { + Self { + block_root, + verified_blobs: RuntimeFixedVector::new(vec![None; max_len]), + verified_data_columns: vec![], + executed_block: None, + reconstruction_started: false, + data_column_recv: None, + } + } + /// Returns the epoch of the block if it is cached, otherwise returns the epoch of the first blob. pub fn epoch(&self) -> Option { self.executed_block @@ -354,6 +312,41 @@ impl PendingComponents { None }) } + + pub fn status_str( + &self, + block_epoch: Epoch, + sampling_column_count: usize, + spec: &ChainSpec, + ) -> String { + let block_count = if self.executed_block.is_some() { 1 } else { 0 }; + if spec.is_peer_das_enabled_for_epoch(block_epoch) { + let data_column_recv_count = if self.data_column_recv.is_some() { + 1 + } else { + 0 + }; + format!( + "block {} data_columns {}/{} data_columns_recv {}", + block_count, + self.verified_data_columns.len(), + sampling_column_count, + data_column_recv_count, + ) + } else { + let num_expected_blobs = if let Some(block) = self.get_cached_block() { + &block.num_blobs_expected().to_string() + } else { + "?" + }; + format!( + "block {} blobs {}/{}", + block_count, + self.verified_blobs.len(), + num_expected_blobs + ) + } + } } /// This is the main struct for this module. Outside methods should @@ -374,7 +367,7 @@ pub struct DataAvailabilityCheckerInner { // the current usage, as it's deconstructed immediately. #[allow(clippy::large_enum_variant)] pub(crate) enum ReconstructColumnsDecision { - Yes(PendingComponents), + Yes(Vec>), No(&'static str), } @@ -455,16 +448,10 @@ impl DataAvailabilityCheckerInner { } /// Puts the KZG verified blobs into the availability cache as pending components. - /// - /// The `data_column_recv` parameter is an optional `Receiver` for data columns that are - /// computed asynchronously. This method remains **used** after PeerDAS activation, because - /// blocks can be made available if the EL already has the blobs and returns them via the - /// `getBlobsV1` engine method. More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). pub fn put_kzg_verified_blobs>>( &self, block_root: Hash256, kzg_verified_blobs: I, - data_column_recv: Option>>, log: &Logger, ) -> Result, AvailabilityCheckError> { let mut kzg_verified_blobs = kzg_verified_blobs.into_iter().peekable(); @@ -474,7 +461,7 @@ impl DataAvailabilityCheckerInner { .map(|verified_blob| verified_blob.as_blob().epoch()) else { // Verified blobs list should be non-empty. - return Err(AvailabilityCheckError::Unexpected); + return Err(AvailabilityCheckError::Unexpected("empty blobs")); }; let mut fixed_blobs = @@ -499,21 +486,22 @@ impl DataAvailabilityCheckerInner { // Merge in the blobs. pending_components.merge_blobs(fixed_blobs); - if data_column_recv.is_some() { - // If `data_column_recv` is `Some`, it means we have all the blobs from engine, and have - // started computing data columns. We store the receiver in `PendingComponents` for - // later use when importing the block. - pending_components.data_column_recv = data_column_recv; - } + debug!(log, "Component added to data availability checker"; + "component" => "blobs", + "block_root" => ?block_root, + "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + ); - if pending_components.is_available(self.sampling_column_count, log) { + if let Some(available_block) = + pending_components.make_available(self.sampling_column_count, &self.spec, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { // We keep the pending components in the availability cache during block import (#5845). // `data_column_recv` is returned as part of the available block and is no longer needed here. - write_lock.put(block_root, pending_components.clone_without_column_recv()); + write_lock.put(block_root, pending_components); drop(write_lock); - pending_components.make_available(&self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) + Ok(Availability::Available(Box::new(available_block))) } else { write_lock.put(block_root, pending_components); Ok(Availability::MissingComponents(block_root)) @@ -535,7 +523,7 @@ impl DataAvailabilityCheckerInner { .map(|verified_blob| verified_blob.as_data_column().epoch()) else { // Verified data_columns list should be non-empty. - return Err(AvailabilityCheckError::Unexpected); + return Err(AvailabilityCheckError::Unexpected("empty columns")); }; let mut write_lock = self.critical.write(); @@ -551,14 +539,72 @@ impl DataAvailabilityCheckerInner { // Merge in the data columns. pending_components.merge_data_columns(kzg_verified_data_columns)?; - if pending_components.is_available(self.sampling_column_count, log) { + debug!(log, "Component added to data availability checker"; + "component" => "data_columns", + "block_root" => ?block_root, + "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + ); + + if let Some(available_block) = + pending_components.make_available(self.sampling_column_count, &self.spec, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { // We keep the pending components in the availability cache during block import (#5845). // `data_column_recv` is returned as part of the available block and is no longer needed here. - write_lock.put(block_root, pending_components.clone_without_column_recv()); + write_lock.put(block_root, pending_components); drop(write_lock); - pending_components.make_available(&self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) + Ok(Availability::Available(Box::new(available_block))) + } else { + write_lock.put(block_root, pending_components); + Ok(Availability::MissingComponents(block_root)) + } + } + + /// The `data_column_recv` parameter is a `Receiver` for data columns that are computed + /// asynchronously. This method is used if the EL already has the blobs and returns them via the + /// `getBlobsV1` engine method. More details in [fetch_blobs.rs](https://github.com/sigp/lighthouse/blob/44f8add41ea2252769bb967864af95b3c13af8ca/beacon_node/beacon_chain/src/fetch_blobs.rs). + pub fn put_computed_data_columns_recv( + &self, + block_root: Hash256, + block_epoch: Epoch, + data_column_recv: oneshot::Receiver>, + log: &Logger, + ) -> Result, AvailabilityCheckError> { + let mut write_lock = self.critical.write(); + + // Grab existing entry or create a new entry. + let mut pending_components = write_lock + .pop_entry(&block_root) + .map(|(_, v)| v) + .unwrap_or_else(|| { + PendingComponents::empty( + block_root, + self.spec.max_blobs_per_block(block_epoch) as usize, + ) + }); + + // We have all the blobs from engine, and have started computing data columns. We store the + // receiver in `PendingComponents` for later use when importing the block. + // TODO(das): Error or log if we overwrite a prior receiver https://github.com/sigp/lighthouse/issues/6764 + pending_components.data_column_recv = Some(data_column_recv); + + debug!(log, "Component added to data availability checker"; + "component" => "data_columns_recv", + "block_root" => ?block_root, + "status" => pending_components.status_str(block_epoch, self.sampling_column_count, &self.spec), + ); + + if let Some(available_block) = + pending_components.make_available(self.sampling_column_count, &self.spec, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { + // We keep the pending components in the availability cache during block import (#5845). + // `data_column_recv` is returned as part of the available block and is no longer needed here. + write_lock.put(block_root, pending_components); + drop(write_lock); + Ok(Availability::Available(Box::new(available_block))) } else { write_lock.put(block_root, pending_components); Ok(Availability::MissingComponents(block_root)) @@ -603,7 +649,7 @@ impl DataAvailabilityCheckerInner { } pending_components.reconstruction_started = true; - ReconstructColumnsDecision::Yes(pending_components.clone_without_column_recv()) + ReconstructColumnsDecision::Yes(pending_components.verified_data_columns.clone()) } /// This could mean some invalid data columns made it through to the `DataAvailabilityChecker`. @@ -643,15 +689,23 @@ impl DataAvailabilityCheckerInner { // Merge in the block. pending_components.merge_block(diet_executed_block); + debug!(log, "Component added to data availability checker"; + "component" => "block", + "block_root" => ?block_root, + "status" => pending_components.status_str(epoch, self.sampling_column_count, &self.spec), + ); + // Check if we have all components and entire set is consistent. - if pending_components.is_available(self.sampling_column_count, log) { + if let Some(available_block) = + pending_components.make_available(self.sampling_column_count, &self.spec, |block| { + self.state_cache.recover_pending_executed_block(block) + })? + { // We keep the pending components in the availability cache during block import (#5845). // `data_column_recv` is returned as part of the available block and is no longer needed here. - write_lock.put(block_root, pending_components.clone_without_column_recv()); + write_lock.put(block_root, pending_components); drop(write_lock); - pending_components.make_available(&self.spec, |diet_block| { - self.state_cache.recover_pending_executed_block(diet_block) - }) + Ok(Availability::Available(Box::new(available_block))) } else { write_lock.put(block_root, pending_components); Ok(Availability::MissingComponents(block_root)) @@ -882,7 +936,6 @@ mod test { parent_eth1_finalization_data, confirmed_state_roots: vec![], consensus_context, - data_column_recv: None, }; let payload_verification_outcome = PayloadVerificationOutcome { @@ -989,7 +1042,7 @@ mod test { for (blob_index, gossip_blob) in blobs.into_iter().enumerate() { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), None, harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); if blob_index == blobs_expected - 1 { assert!(matches!(availability, Availability::Available(_))); @@ -1017,11 +1070,10 @@ mod test { for gossip_blob in blobs { kzg_verified_blobs.push(gossip_blob.into_inner()); let availability = cache - .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), None, harness.logger()) + .put_kzg_verified_blobs(root, kzg_verified_blobs.clone(), harness.logger()) .expect("should put blob"); - assert_eq!( - availability, - Availability::MissingComponents(root), + assert!( + matches!(availability, Availability::MissingComponents(_)), "should be pending block" ); assert_eq!(cache.critical.read().len(), 1); @@ -1273,7 +1325,6 @@ mod pending_components_tests { }, confirmed_state_roots: vec![], consensus_context: ConsensusContext::new(Slot::new(0)), - data_column_recv: None, }, payload_verification_outcome: PayloadVerificationOutcome { payload_verification_status: PayloadVerificationStatus::Verified, diff --git a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs index 2a2a0431ccb..5b9b7c70233 100644 --- a/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs +++ b/beacon_node/beacon_chain/src/data_availability_checker/state_lru_cache.rs @@ -136,7 +136,6 @@ impl StateLRUCache { consensus_context: diet_executed_block .consensus_context .into_consensus_context(), - data_column_recv: None, }, payload_verification_outcome: diet_executed_block.payload_verification_outcome, }) diff --git a/beacon_node/beacon_chain/src/early_attester_cache.rs b/beacon_node/beacon_chain/src/early_attester_cache.rs index c94ea0e9414..a90911026cf 100644 --- a/beacon_node/beacon_chain/src/early_attester_cache.rs +++ b/beacon_node/beacon_chain/src/early_attester_cache.rs @@ -1,4 +1,4 @@ -use crate::data_availability_checker::AvailableBlock; +use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{ attester_cache::{CommitteeLengths, Error}, metrics, @@ -52,7 +52,7 @@ impl EarlyAttesterCache { pub fn add_head_block( &self, beacon_block_root: Hash256, - block: AvailableBlock, + block: &AvailableBlock, proto_block: ProtoBlock, state: &BeaconState, spec: &ChainSpec, @@ -70,14 +70,23 @@ impl EarlyAttesterCache { }, }; - let (_, block, blobs, data_columns) = block.deconstruct(); + let (blobs, data_columns) = match block.data() { + AvailableBlockData::NoData => (None, None), + AvailableBlockData::Blobs(blobs) => (Some(blobs.clone()), None), + AvailableBlockData::DataColumns(data_columns) => (None, Some(data_columns.clone())), + // TODO(das): Once the columns are received, they will not be available in + // the early attester cache. If someone does a query to us via RPC we + // will get downscored. + AvailableBlockData::DataColumnsRecv(_) => (None, None), + }; + let item = CacheItem { epoch, committee_lengths, beacon_block_root, source, target, - block, + block: block.block_cloned(), blobs, data_columns, proto_block, diff --git a/beacon_node/beacon_chain/src/historical_blocks.rs b/beacon_node/beacon_chain/src/historical_blocks.rs index a48f32e7b40..a9caeb18bb1 100644 --- a/beacon_node/beacon_chain/src/historical_blocks.rs +++ b/beacon_node/beacon_chain/src/historical_blocks.rs @@ -1,4 +1,4 @@ -use crate::data_availability_checker::AvailableBlock; +use crate::data_availability_checker::{AvailableBlock, AvailableBlockData}; use crate::{metrics, BeaconChain, BeaconChainTypes}; use itertools::Itertools; use slog::debug; @@ -105,7 +105,7 @@ impl BeaconChain { let blob_batch_size = blocks_to_import .iter() - .filter(|available_block| available_block.blobs().is_some()) + .filter(|available_block| available_block.has_blobs()) .count() .saturating_mul(n_blob_ops_per_block); @@ -114,14 +114,13 @@ impl BeaconChain { let mut new_oldest_blob_slot = blob_info.oldest_blob_slot; let mut new_oldest_data_column_slot = data_column_info.oldest_data_column_slot; - let mut blob_batch = Vec::with_capacity(blob_batch_size); + let mut blob_batch = Vec::::with_capacity(blob_batch_size); let mut cold_batch = Vec::with_capacity(blocks_to_import.len()); let mut hot_batch = Vec::with_capacity(blocks_to_import.len()); let mut signed_blocks = Vec::with_capacity(blocks_to_import.len()); for available_block in blocks_to_import.into_iter().rev() { - let (block_root, block, maybe_blobs, maybe_data_columns) = - available_block.deconstruct(); + let (block_root, block, block_data) = available_block.deconstruct(); if block_root != expected_block_root { return Err(HistoricalBlockError::MismatchedBlockRoot { @@ -144,17 +143,26 @@ impl BeaconChain { ); } - // Store the blobs too - if let Some(blobs) = maybe_blobs { - new_oldest_blob_slot = Some(block.slot()); - self.store - .blobs_as_kv_store_ops(&block_root, blobs, &mut blob_batch); + match &block_data { + AvailableBlockData::NoData => {} + AvailableBlockData::Blobs(..) => { + new_oldest_blob_slot = Some(block.slot()); + } + AvailableBlockData::DataColumns(_) | AvailableBlockData::DataColumnsRecv(_) => { + new_oldest_data_column_slot = Some(block.slot()); + } } - // Store the data columns too - if let Some(data_columns) = maybe_data_columns { - new_oldest_data_column_slot = Some(block.slot()); - self.store - .data_columns_as_kv_store_ops(&block_root, data_columns, &mut blob_batch); + + // Store the blobs or data columns too + if let Some(op) = self + .get_blobs_or_columns_store_op(block_root, block_data) + .map_err(|e| { + HistoricalBlockError::StoreError(StoreError::DBError { + message: format!("get_blobs_or_columns_store_op error {e:?}"), + }) + })? + { + blob_batch.extend(self.store.convert_to_kv_batch(vec![op])?); } // Store block roots, including at all skip slots in the freezer DB. diff --git a/beacon_node/beacon_chain/tests/attestation_production.rs b/beacon_node/beacon_chain/tests/attestation_production.rs index 621475a3ece..d89a8530e1b 100644 --- a/beacon_node/beacon_chain/tests/attestation_production.rs +++ b/beacon_node/beacon_chain/tests/attestation_production.rs @@ -242,7 +242,7 @@ async fn produces_attestations() { .early_attester_cache .add_head_block( block_root, - available_block, + &available_block, proto_block, &state, &chain.spec, @@ -310,7 +310,7 @@ async fn early_attester_cache_old_request() { .early_attester_cache .add_head_block( head.beacon_block_root, - available_block, + &available_block, head_proto_block, &head.beacon_state, &harness.chain.spec, diff --git a/beacon_node/beacon_chain/tests/store_tests.rs b/beacon_node/beacon_chain/tests/store_tests.rs index 7a2df769700..997a2859b79 100644 --- a/beacon_node/beacon_chain/tests/store_tests.rs +++ b/beacon_node/beacon_chain/tests/store_tests.rs @@ -2517,18 +2517,13 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { // Corrupt the signature on the 1st block to ensure that the backfill processor is checking // signatures correctly. Regression test for https://github.com/sigp/lighthouse/pull/5120. - let mut batch_with_invalid_first_block = available_blocks.clone(); + let mut batch_with_invalid_first_block = + available_blocks.iter().map(clone_block).collect::>(); batch_with_invalid_first_block[0] = { - let (block_root, block, blobs, data_columns) = available_blocks[0].clone().deconstruct(); + let (block_root, block, data) = clone_block(&available_blocks[0]).deconstruct(); let mut corrupt_block = (*block).clone(); *corrupt_block.signature_mut() = Signature::empty(); - AvailableBlock::__new_for_testing( - block_root, - Arc::new(corrupt_block), - blobs, - data_columns, - Arc::new(spec), - ) + AvailableBlock::__new_for_testing(block_root, Arc::new(corrupt_block), data, Arc::new(spec)) }; // Importing the invalid batch should error. @@ -2540,8 +2535,9 @@ async fn weak_subjectivity_sync_test(slots: Vec, checkpoint_slot: Slot) { )); // Importing the batch with valid signatures should succeed. + let available_blocks_dup = available_blocks.iter().map(clone_block).collect::>(); beacon_chain - .import_historical_block_batch(available_blocks.clone()) + .import_historical_block_batch(available_blocks_dup) .unwrap(); assert_eq!(beacon_chain.store.get_oldest_block_slot(), 0); @@ -3690,3 +3686,7 @@ fn get_blocks( .map(|checkpoint| checkpoint.beacon_block_root.into()) .collect() } + +fn clone_block(block: &AvailableBlock) -> AvailableBlock { + block.__clone_without_recv().unwrap() +} From 6e11bddd4bd0de87a93e7880a9818bd80c95b75b Mon Sep 17 00:00:00 2001 From: Krishang Shah <93703995+kamuik16@users.noreply.github.com> Date: Mon, 24 Feb 2025 11:33:17 +0530 Subject: [PATCH 08/10] feat: adds CLI flags to delay publishing for edge case testing on PeerDAS devnets (#6947) Closes #6919 --- beacon_node/beacon_chain/src/chain_config.rs | 6 +++++ beacon_node/http_api/src/publish_blocks.rs | 27 ++++++++++++++++++++ beacon_node/src/cli.rs | 25 ++++++++++++++++++ beacon_node/src/config.rs | 8 ++++++ lighthouse/tests/beacon_node.rs | 26 +++++++++++++++++++ 5 files changed, 92 insertions(+) diff --git a/beacon_node/beacon_chain/src/chain_config.rs b/beacon_node/beacon_chain/src/chain_config.rs index fcdd57abbc8..d3852183b9f 100644 --- a/beacon_node/beacon_chain/src/chain_config.rs +++ b/beacon_node/beacon_chain/src/chain_config.rs @@ -94,6 +94,10 @@ pub struct ChainConfig { /// The delay in milliseconds applied by the node between sending each blob or data column batch. /// This doesn't apply if the node is the block proposer. pub blob_publication_batch_interval: Duration, + /// Artificial delay for block publishing. For PeerDAS testing only. + pub block_publishing_delay: Option, + /// Artificial delay for data column publishing. For PeerDAS testing only. + pub data_column_publishing_delay: Option, } impl Default for ChainConfig { @@ -129,6 +133,8 @@ impl Default for ChainConfig { enable_sampling: false, blob_publication_batches: 4, blob_publication_batch_interval: Duration::from_millis(300), + block_publishing_delay: None, + data_column_publishing_delay: None, } } } diff --git a/beacon_node/http_api/src/publish_blocks.rs b/beacon_node/http_api/src/publish_blocks.rs index 60d4b2f16ed..072ae5dc03c 100644 --- a/beacon_node/http_api/src/publish_blocks.rs +++ b/beacon_node/http_api/src/publish_blocks.rs @@ -86,6 +86,8 @@ pub async fn publish_block>( network_globals: Arc>, ) -> Result { let seen_timestamp = timestamp_now(); + let block_publishing_delay_for_testing = chain.config.block_publishing_delay; + let data_column_publishing_delay_for_testing = chain.config.data_column_publishing_delay; let (unverified_block, unverified_blobs, is_locally_built_block) = match provenanced_block { ProvenancedBlock::Local(block, blobs, _) => (block, blobs, true), @@ -147,6 +149,14 @@ pub async fn publish_block>( let should_publish_block = gossip_verified_block_result.is_ok(); if BroadcastValidation::Gossip == validation_level && should_publish_block { + if let Some(block_publishing_delay) = block_publishing_delay_for_testing { + debug!( + log, + "Publishing block with artificial delay"; + "block_publishing_delay" => ?block_publishing_delay + ); + tokio::time::sleep(block_publishing_delay).await; + } publish_block_p2p( block.clone(), sender_clone.clone(), @@ -207,6 +217,23 @@ pub async fn publish_block>( } if gossip_verified_columns.iter().map(Option::is_some).count() > 0 { + if let Some(data_column_publishing_delay) = data_column_publishing_delay_for_testing { + // Subtract block publishing delay if it is also used. + // Note: if `data_column_publishing_delay` is less than `block_publishing_delay`, it + // will still be delayed by `block_publishing_delay`. This could be solved with spawning + // async tasks but the limitation is minor and I believe it's probably not worth + // affecting the mainnet code path. + let block_publishing_delay = block_publishing_delay_for_testing.unwrap_or_default(); + let delay = data_column_publishing_delay.saturating_sub(block_publishing_delay); + if !delay.is_zero() { + debug!( + log, + "Publishing data columns with artificial delay"; + "data_column_publishing_delay" => ?data_column_publishing_delay + ); + tokio::time::sleep(delay).await; + } + } publish_column_sidecars(network_tx, &gossip_verified_columns, &chain).map_err(|_| { warp_utils::reject::custom_server_error("unable to publish data column sidecars".into()) })?; diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 4c2daecdd34..200de0b14a2 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1599,5 +1599,30 @@ pub fn cli_app() -> Command { .action(ArgAction::Set) .display_order(0) ) + .arg( + Arg::new("delay-block-publishing") + .long("delay-block-publishing") + .value_name("SECONDS") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("TESTING ONLY: Artificially delay block publishing by the specified number of seconds. \ + This only works for if `BroadcastValidation::Gossip` is used (default). \ + DO NOT USE IN PRODUCTION.") + .hide(true) + .display_order(0) + ) + .arg( + Arg::new("delay-data-column-publishing") + .long("delay-data-column-publishing") + .value_name("SECONDS") + .action(ArgAction::Set) + .help_heading(FLAG_HEADER) + .help("TESTING ONLY: Artificially delay data column publishing by the specified number of seconds. \ + Limitation: If `delay-block-publishing` is also used, data columns will be delayed for a \ + minimum of `delay-block-publishing` seconds. + DO NOT USE IN PRODUCTION.") + .hide(true) + .display_order(0) + ) .group(ArgGroup::new("enable_http").args(["http", "gui", "staking"]).multiple(true)) } diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 24d569bea22..0b9e90717d5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -895,6 +895,14 @@ pub fn get_config( .max_gossip_aggregate_batch_size = clap_utils::parse_required(cli_args, "beacon-processor-aggregate-batch-size")?; + if let Some(delay) = clap_utils::parse_optional(cli_args, "delay-block-publishing")? { + client_config.chain.block_publishing_delay = Some(Duration::from_secs_f64(delay)); + } + + if let Some(delay) = clap_utils::parse_optional(cli_args, "delay-data-column-publishing")? { + client_config.chain.data_column_publishing_delay = Some(Duration::from_secs_f64(delay)); + } + Ok(client_config) } diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 03314930b9b..283c20cc6ec 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2715,3 +2715,29 @@ fn beacon_node_backend_override() { assert_eq!(config.store.backend, BeaconNodeBackend::LevelDb); }); } + +#[test] +fn block_publishing_delay_for_testing() { + CommandLineTest::new() + .flag("delay-block-publishing", Some("2.5")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.block_publishing_delay, + Some(Duration::from_secs_f64(2.5f64)) + ); + }); +} + +#[test] +fn data_column_publishing_delay_for_testing() { + CommandLineTest::new() + .flag("delay-data-column-publishing", Some("3.5")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!( + config.chain.data_column_publishing_delay, + Some(Duration::from_secs_f64(3.5f64)) + ); + }); +} From 454c7d05c40bdf82c457ed88b47dc6875f0f29ac Mon Sep 17 00:00:00 2001 From: Michael Sproul Date: Mon, 24 Feb 2025 18:15:32 +1100 Subject: [PATCH 09/10] Remove LC server config from HTTP API (#7017) Partly addresses - https://github.com/sigp/lighthouse/issues/6959 Use the `enable_light_client_server` field from the beacon chain config in the HTTP API. I think we can make this the single source of truth, as I think the network crate also has access to the beacon chain config. --- beacon_node/http_api/src/lib.rs | 63 ++++++++++++-------------- beacon_node/http_api/src/test_utils.rs | 1 - beacon_node/src/config.rs | 3 -- lighthouse/tests/beacon_node.rs | 2 - testing/simulator/src/local_network.rs | 1 - 5 files changed, 29 insertions(+), 41 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index 5d75dc8c9a0..b3fd864f6d6 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -155,7 +155,6 @@ pub struct Config { pub enable_beacon_processor: bool, #[serde(with = "eth2::types::serde_status_code")] pub duplicate_block_status_code: StatusCode, - pub enable_light_client_server: bool, pub target_peers: usize, } @@ -171,7 +170,6 @@ impl Default for Config { sse_capacity_multiplier: 1, enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, - enable_light_client_server: true, target_peers: 100, } } @@ -296,18 +294,6 @@ pub fn prometheus_metrics() -> warp::filters::log::Log impl Filter + Clone { - warp::any() - .and_then(move || async move { - if is_enabled { - Ok(()) - } else { - Err(warp::reject::not_found()) - } - }) - .untuple_one() -} - /// Creates a server that will serve requests using information from `ctx`. /// /// The server will shut down gracefully when the `shutdown` future resolves. @@ -493,6 +479,18 @@ pub fn serve( }, ); + // Create a `warp` filter that returns 404s if the light client server is disabled. + let light_client_server_filter = + warp::any() + .and(chain_filter.clone()) + .then(|chain: Arc>| async move { + if chain.config.enable_light_client_server { + Ok(()) + } else { + Err(warp::reject::not_found()) + } + }); + // Create a `warp` filter that provides access to the logger. let inner_ctx = ctx.clone(); let log_filter = warp::any().map(move || inner_ctx.log.clone()); @@ -2452,6 +2450,7 @@ pub fn serve( let beacon_light_client_path = eth_v1 .and(warp::path("beacon")) .and(warp::path("light_client")) + .and(light_client_server_filter) .and(chain_filter.clone()); // GET beacon/light_client/bootstrap/{block_root} @@ -2467,11 +2466,13 @@ pub fn serve( .and(warp::path::end()) .and(warp::header::optional::("accept")) .then( - |chain: Arc>, + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, task_spawner: TaskSpawner, block_root: Hash256, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; get_light_client_bootstrap::(chain, &block_root, accept_header) }) }, @@ -2485,10 +2486,12 @@ pub fn serve( .and(warp::path::end()) .and(warp::header::optional::("accept")) .then( - |chain: Arc>, + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, task_spawner: TaskSpawner, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; let update = chain .light_client_server_cache .get_latest_optimistic_update() @@ -2532,10 +2535,12 @@ pub fn serve( .and(warp::path::end()) .and(warp::header::optional::("accept")) .then( - |chain: Arc>, + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, task_spawner: TaskSpawner, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; let update = chain .light_client_server_cache .get_latest_finality_update() @@ -2580,11 +2585,13 @@ pub fn serve( .and(warp::query::()) .and(warp::header::optional::("accept")) .then( - |chain: Arc>, + |light_client_server_enabled: Result<(), Rejection>, + chain: Arc>, task_spawner: TaskSpawner, query: LightClientUpdatesQuery, accept_header: Option| { task_spawner.blocking_response_task(Priority::P1, move || { + light_client_server_enabled?; get_light_client_updates::(chain, query, accept_header) }) }, @@ -4723,22 +4730,10 @@ pub fn serve( .uor(get_lighthouse_database_info) .uor(get_lighthouse_block_rewards) .uor(get_lighthouse_attestation_performance) - .uor( - enable(ctx.config.enable_light_client_server) - .and(get_beacon_light_client_optimistic_update), - ) - .uor( - enable(ctx.config.enable_light_client_server) - .and(get_beacon_light_client_finality_update), - ) - .uor( - enable(ctx.config.enable_light_client_server) - .and(get_beacon_light_client_bootstrap), - ) - .uor( - enable(ctx.config.enable_light_client_server) - .and(get_beacon_light_client_updates), - ) + .uor(get_beacon_light_client_optimistic_update) + .uor(get_beacon_light_client_finality_update) + .uor(get_beacon_light_client_bootstrap) + .uor(get_beacon_light_client_updates) .uor(get_lighthouse_block_packing_efficiency) .uor(get_lighthouse_merge_readiness) .uor(get_events) diff --git a/beacon_node/http_api/src/test_utils.rs b/beacon_node/http_api/src/test_utils.rs index fbc92a45cce..c692ec999e3 100644 --- a/beacon_node/http_api/src/test_utils.rs +++ b/beacon_node/http_api/src/test_utils.rs @@ -249,7 +249,6 @@ pub async fn create_api_server_with_config( enabled: true, listen_port: port, data_dir: std::path::PathBuf::from(DEFAULT_ROOT_DIR), - enable_light_client_server: true, ..http_config }, chain: Some(chain), diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index 0b9e90717d5..ec217ed71e5 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -174,9 +174,6 @@ pub fn get_config( client_config.http_api.duplicate_block_status_code = parse_required(cli_args, "http-duplicate-block-status")?; - - client_config.http_api.enable_light_client_server = - !cli_args.get_flag("disable-light-client-server"); } if cli_args.get_flag("light-client-server") { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index 283c20cc6ec..f64f5532903 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2506,7 +2506,6 @@ fn light_client_server_default() { .with_config(|config| { assert!(config.network.enable_light_client_server); assert!(config.chain.enable_light_client_server); - assert!(config.http_api.enable_light_client_server); }); } @@ -2539,7 +2538,6 @@ fn light_client_http_server_disabled() { .flag("disable-light-client-server", None) .run_with_zero_port() .with_config(|config| { - assert!(!config.http_api.enable_light_client_server); assert!(!config.network.enable_light_client_server); assert!(!config.chain.enable_light_client_server); }); diff --git a/testing/simulator/src/local_network.rs b/testing/simulator/src/local_network.rs index a95c15c2313..3914d33f936 100644 --- a/testing/simulator/src/local_network.rs +++ b/testing/simulator/src/local_network.rs @@ -44,7 +44,6 @@ fn default_client_config(network_params: LocalNetworkParams, genesis_time: u64) beacon_config.network.enable_light_client_server = true; beacon_config.network.discv5_config.enable_packet_filter = false; beacon_config.chain.enable_light_client_server = true; - beacon_config.http_api.enable_light_client_server = true; beacon_config.chain.optimistic_finalized_sync = false; beacon_config.trusted_setup = serde_json::from_reader(get_trusted_setup().as_slice()) .expect("Trusted setup bytes should be valid"); From 54b4150a6220b0bc61e8e069c6efdeba6153135d Mon Sep 17 00:00:00 2001 From: Jimmy Chen Date: Mon, 24 Feb 2025 19:30:11 +1100 Subject: [PATCH 10/10] Add test flag to override `SYNC_TOLERANCE_EPOCHS` for range sync testing (#7030) Related to #6880, an issue that's usually observed on local devnets with small number of nodes. When testing range sync, I usually shutdown a node for some period of time and restart it again. However, if it's within `SYNC_TOLERANCE_EPOCHS` (8), Lighthouse would consider the node as synced, and if it may attempt to produce a block if requested by a validator - on a local devnet, nodes frequently produce blocks - when this happens, the node ends up producing a block that would revert finality and would get disconnected from peers immediately. ### Usage Run Lighthouse BN with this flag to override: ``` --sync-tolerance--epoch 0 ``` --- beacon_node/http_api/src/lib.rs | 9 +++++++-- beacon_node/src/cli.rs | 13 +++++++++++++ beacon_node/src/config.rs | 5 ++++- lighthouse/tests/beacon_node.rs | 11 +++++++++++ scripts/local_testnet/network_params_das.yaml | 4 ++++ 5 files changed, 39 insertions(+), 3 deletions(-) diff --git a/beacon_node/http_api/src/lib.rs b/beacon_node/http_api/src/lib.rs index b3fd864f6d6..01dd6bea5f8 100644 --- a/beacon_node/http_api/src/lib.rs +++ b/beacon_node/http_api/src/lib.rs @@ -112,7 +112,7 @@ const API_PREFIX: &str = "eth"; /// /// This helps prevent attacks where nodes can convince us that we're syncing some non-existent /// finalized head. -const SYNC_TOLERANCE_EPOCHS: u64 = 8; +const DEFAULT_SYNC_TOLERANCE_EPOCHS: u64 = 8; /// A custom type which allows for both unsecured and TLS-enabled HTTP servers. type HttpServer = (SocketAddr, Pin + Send>>); @@ -156,6 +156,7 @@ pub struct Config { #[serde(with = "eth2::types::serde_status_code")] pub duplicate_block_status_code: StatusCode, pub target_peers: usize, + pub sync_tolerance_epochs: Option, } impl Default for Config { @@ -171,6 +172,7 @@ impl Default for Config { enable_beacon_processor: true, duplicate_block_status_code: StatusCode::ACCEPTED, target_peers: 100, + sync_tolerance_epochs: None, } } } @@ -459,7 +461,10 @@ pub fn serve( ) })?; - let tolerance = SYNC_TOLERANCE_EPOCHS * T::EthSpec::slots_per_epoch(); + let sync_tolerance_epochs = config + .sync_tolerance_epochs + .unwrap_or(DEFAULT_SYNC_TOLERANCE_EPOCHS); + let tolerance = sync_tolerance_epochs * T::EthSpec::slots_per_epoch(); if head_slot + tolerance >= current_slot { Ok(()) diff --git a/beacon_node/src/cli.rs b/beacon_node/src/cli.rs index 200de0b14a2..92e46e5bb64 100644 --- a/beacon_node/src/cli.rs +++ b/beacon_node/src/cli.rs @@ -1509,6 +1509,19 @@ pub fn cli_app() -> Command { .help_heading(FLAG_HEADER) .display_order(0) ) + .arg( + Arg::new("sync-tolerance-epochs") + .long("sync-tolerance-epochs") + .help("Overrides the default SYNC_TOLERANCE_EPOCHS. This flag is not intended \ + for production and MUST only be used in TESTING only. This is primarily used \ + for testing range sync, to prevent the node from producing a block before the \ + node is synced with the network which may result in the node getting \ + disconnected from peers immediately.") + .hide(true) + .requires("enable_http") + .action(ArgAction::Set) + .display_order(0) + ) .arg( Arg::new("gui") .long("gui") diff --git a/beacon_node/src/config.rs b/beacon_node/src/config.rs index ec217ed71e5..292385c27da 100644 --- a/beacon_node/src/config.rs +++ b/beacon_node/src/config.rs @@ -8,7 +8,7 @@ use beacon_chain::graffiti_calculator::GraffitiOrigin; use beacon_chain::TrustedSetup; use clap::{parser::ValueSource, ArgMatches, Id}; use clap_utils::flags::DISABLE_MALLOC_TUNING_FLAG; -use clap_utils::{parse_flag, parse_required}; +use clap_utils::{parse_flag, parse_optional, parse_required}; use client::{ClientConfig, ClientGenesis}; use directory::{DEFAULT_BEACON_NODE_DIR, DEFAULT_NETWORK_DIR, DEFAULT_ROOT_DIR}; use environment::RuntimeContext; @@ -174,6 +174,9 @@ pub fn get_config( client_config.http_api.duplicate_block_status_code = parse_required(cli_args, "http-duplicate-block-status")?; + + client_config.http_api.sync_tolerance_epochs = + parse_optional(cli_args, "sync-tolerance-epochs")?; } if cli_args.get_flag("light-client-server") { diff --git a/lighthouse/tests/beacon_node.rs b/lighthouse/tests/beacon_node.rs index f64f5532903..415629024a3 100644 --- a/lighthouse/tests/beacon_node.rs +++ b/lighthouse/tests/beacon_node.rs @@ -2543,6 +2543,17 @@ fn light_client_http_server_disabled() { }); } +#[test] +fn sync_tolerance_epochs() { + CommandLineTest::new() + .flag("http", None) + .flag("sync-tolerance-epochs", Some("0")) + .run_with_zero_port() + .with_config(|config| { + assert_eq!(config.http_api.sync_tolerance_epochs, Some(0)); + }); +} + #[test] fn gui_flag() { CommandLineTest::new() diff --git a/scripts/local_testnet/network_params_das.yaml b/scripts/local_testnet/network_params_das.yaml index 030aa2b8200..80b4bc95c60 100644 --- a/scripts/local_testnet/network_params_das.yaml +++ b/scripts/local_testnet/network_params_das.yaml @@ -4,11 +4,15 @@ participants: cl_extra_params: - --subscribe-all-data-column-subnets - --subscribe-all-subnets + # Note: useful for testing range sync (only produce block if node is in sync to prevent forking) + - --sync-tolerance-epochs=0 - --target-peers=3 count: 2 - cl_type: lighthouse cl_image: lighthouse:local cl_extra_params: + # Note: useful for testing range sync (only produce block if node is in sync to prevent forking) + - --sync-tolerance-epochs=0 - --target-peers=3 count: 2 network_params: